Beispiel #1
0
 def order_to_path(self, tn, order):
     tn_copy = tn.copy()
     tn_copy.fix()
     edges_list = tn_copy.edges_by_name
     nodes_list = tn_copy.nodes_by_name
     lhs = [
         ''.join(
             opt_einsum.get_symbol(edges_list.index(e))
             for e in tn_copy.network.nodes[(0, n)]['edges'])
         for n in nodes_list
     ]
     rhs = ''.join(
         opt_einsum.get_symbol(edges_list.index(e))
         for e in tn_copy.open_edges)
     path = []
     for o in order:
         i, j = nodes_list.index(o[0][0]), nodes_list.index(o[0][1])
         path.append((i, j))
         nodes_list.pop(max(i, j))
         nodes_list.pop(min(i, j))
         nodes_list.append(o[1])
     return ','.join(lhs) + '->' + rhs, path, {
         opt_einsum.get_symbol(edges_list.index(e)): e
         for e in edges_list
     }
Beispiel #2
0
 def subscripts(self, nodes=None):
     if nodes is None:
         nodes = list(self.nodes_by_name)
     edges = [
         e for e in self.edges_by_name
         if 'fix_to' not in self.network.nodes[(1, e)]
     ]
     lhs = []
     shapes = []
     for node in nodes:
         lhs.append(''.join([
             opt_einsum.get_symbol(edges.index(e))
             for e in self.network.nodes[(0, node)]['edges'] if e in edges
         ]))
         shapes.append(
             list(2 if self.network.nodes[(
                 1, e)]['dim'] is None else self.network.nodes[(1,
                                                                e)]['dim']
                  for e in self.network.nodes[(0, node)]['edges']
                  if e in edges))
     rhs = ''.join([
         opt_einsum.get_symbol(edges.index(e)) for e in self.open_edges
         if e in edges
     ])
     return lhs, rhs, shapes
Beispiel #3
0
    def symbolize_dims(self, plate_to_symbol=None):
        """
        Assign unique symbols to all tensor dimensions.
        """
        plate_to_symbol = {} if plate_to_symbol is None else plate_to_symbol
        symbol_to_dim = {}
        for site in self.nodes.values():
            if site["type"] != "sample":
                continue

            # allocate even symbols for plate dims
            dim_to_symbol = {}
            for frame in site["cond_indep_stack"]:
                if frame.vectorized:
                    if frame.name in plate_to_symbol:
                        symbol = plate_to_symbol[frame.name]
                    else:
                        symbol = opt_einsum.get_symbol(2 * len(plate_to_symbol))
                        plate_to_symbol[frame.name] = symbol
                    symbol_to_dim[symbol] = frame.dim
                    dim_to_symbol[frame.dim] = symbol

            # allocate odd symbols for enum dims
            for dim, id_ in site["infer"].get("_dim_to_id", {}).items():
                symbol = opt_einsum.get_symbol(1 + 2 * id_)
                symbol_to_dim[symbol] = dim
                dim_to_symbol[dim] = symbol
            enum_dim = site["infer"].get("_enumerate_dim")
            if enum_dim is not None:
                site["infer"]["_enumerate_symbol"] = dim_to_symbol[enum_dim]
            site["infer"]["_dim_to_symbol"] = dim_to_symbol

        self.plate_to_symbol = plate_to_symbol
        self.symbol_to_dim = symbol_to_dim
Beispiel #4
0
def create_sycamore_row(start_idx, indices):
    einsum_str = ""
    # First row of 2-qubit gates
    for i in range(0, N, 2):
        # set the four indices
        # indices(i)-------start_idx+i
        #              |X|
        # indices(i+1)-----start_idx+i+1
        ul, ll, ur, lr = (oe.get_symbol(i)
                          for i in (indices[i], indices[i + 1], start_idx + i,
                                    start_idx + i + 1))
        einsum_str += "{}{}{}{},".format(ul, ll, ur, lr)
    right_idx = [start_idx]

    # second row
    for i in range(0, N - 3, 2):
        # set the four indices
        # start_idx+i+1-----------start_idx+i+N
        #                  |X|
        # startx_idx+i+2----------start_idx+i+N+1
        ul, ll, ur, lr = (oe.get_symbol(i)
                          for i in (start_idx + i + 1, start_idx + i + 2,
                                    start_idx + i + N, start_idx + i + N + 1))
        einsum_str += "{}{}{}{},".format(ul, ll, ur, lr)
        right_idx += [start_idx + i + N, start_idx + i + N + 1]
    # compute final column of indices
    right_idx += [start_idx + N - 1]

    return einsum_str, right_idx
Beispiel #5
0
def make_chain_einsum(num_steps):
    inputs = [str(opt_einsum.get_symbol(0))]
    for t in range(num_steps):
        inputs.append(
            str(opt_einsum.get_symbol(t)) + str(opt_einsum.get_symbol(t + 1)))
    equation = ",".join(inputs) + "->"
    return equation
 def from_order(cls, tn, order):
     tn_copy = tn.copy()
     tree_dic = {}
     edge_list = list(e[1] for e in tn_copy.edges)
     for node in tn_copy.nodes:
         tree_dic[node[1]] = cls(subscripts=''.join([opt_einsum.get_symbol(edge_list.index(e))
                                                     for e in tn_copy.network.nodes[node]['edges']]), name=node[1])
     for o in order:
         stn_name, _ = tn_copy.encapsulate(o[0], stn_name=o[1])
         tree_dic[stn_name] = cls(subscripts=''.join([opt_einsum.get_symbol(edge_list.index(
             e)) for e in tn_copy.network.nodes[(0, stn_name)]['edges']]), name=o[1], children=[tree_dic[i] for i in o[0]])
     return tree_dic['#']
Beispiel #7
0
def test_chain_sharing(size, backend):
    xs = [np.random.rand(2, 2) for _ in range(size)]
    alphabet = ''.join(get_symbol(i) for i in range(size + 1))
    names = [alphabet[i:i + 2] for i in range(size)]
    inputs = ','.join(names)

    num_exprs_nosharing = 0
    for i in range(size + 1):
        with shared_intermediates() as cache:
            target = alphabet[i]
            eq = '{}->{}'.format(inputs, target)
            expr = contract_expression(eq, *(x.shape for x in xs))
            expr(*xs, backend=backend)
            num_exprs_nosharing += _compute_cost(cache)

    with shared_intermediates() as cache:
        print(inputs)
        for i in range(size + 1):
            target = alphabet[i]
            eq = '{}->{}'.format(inputs, target)
            path_info = contract_path(eq, *xs)
            print(path_info[1])
            expr = contract_expression(eq, *(x.shape for x in xs))
            expr(*xs, backend=backend)
        num_exprs_sharing = _compute_cost(cache)

    print('-' * 40)
    print('Without sharing: {} expressions'.format(num_exprs_nosharing))
    print('With sharing: {} expressions'.format(num_exprs_sharing))
    assert num_exprs_nosharing > num_exprs_sharing
Beispiel #8
0
def test_chain_sharing(size, backend):
    xs = [np.random.rand(2, 2) for _ in range(size)]
    alphabet = ''.join(get_symbol(i) for i in range(size + 1))
    names = [alphabet[i:i+2] for i in range(size)]
    inputs = ','.join(names)

    num_exprs_nosharing = 0
    for i in range(size + 1):
        with shared_intermediates() as cache:
            target = alphabet[i]
            eq = '{}->{}'.format(inputs, target)
            expr = contract_expression(eq, *(x.shape for x in xs))
            expr(*xs, backend=backend)
            num_exprs_nosharing += _compute_cost(cache)

    with shared_intermediates() as cache:
        print(inputs)
        for i in range(size + 1):
            target = alphabet[i]
            eq = '{}->{}'.format(inputs, target)
            path_info = contract_path(eq, *xs)
            print(path_info[1])
            expr = contract_expression(eq, *(x.shape for x in xs))
            expr(*xs, backend=backend)
        num_exprs_sharing = _compute_cost(cache)

    print('-' * 40)
    print('Without sharing: {} expressions'.format(num_exprs_nosharing))
    print('With sharing: {} expressions'.format(num_exprs_sharing))
    assert num_exprs_nosharing > num_exprs_sharing
Beispiel #9
0
def symbol_generate(base_map):
    if isinstance(base_map, dict):
        base_map = base_map.values()
    for i in range(100):
        symbol = get_symbol(i)
        if symbol not in base_map:
            yield symbol
Beispiel #10
0
def test_large_path(num_symbols):
    symbols = ''.join(oe.get_symbol(i) for i in range(num_symbols))
    dimension_dict = dict(zip(symbols, itertools.cycle([2, 3, 4])))
    expression = ','.join(symbols[t:t + 2] for t in range(num_symbols - 1))
    tensors = oe.helpers.build_views(expression, dimension_dict=dimension_dict)

    # Check that path construction does not crash
    oe.contract_path(expression, *tensors, optimize='greedy')
Beispiel #11
0
def test_large_path(num_symbols):
    symbols = ''.join(oe.get_symbol(i) for i in range(num_symbols))
    dimension_dict = dict(zip(symbols, itertools.cycle([2, 3, 4])))
    expression = ','.join(symbols[t:t + 2] for t in range(num_symbols - 1))
    tensors = oe.helpers.build_views(expression, dimension_dict=dimension_dict)

    # Check that path construction does not crash
    oe.contract_path(expression, *tensors, optimize='greedy')
Beispiel #12
0
    def get_state_vector(self):
        # l: the maximum number of index one qubit may use
        l = len(self._gates) * 2 + 2
        cont_shapes = []
        cont_indexes = []
        qubit_indexes = [l * i for i in range(self._qubits_num)]

        # add qubit_tensor
        for st in self._init_state:
            cont_shapes.append([2**len(st.input_qubits)])
            index_str = ""
            for q in st.input_qubits:
                index_str += oe.get_symbol(qubit_indexes[q])
            cont_indexes.append(index_str)

        # add gate_tensor
        for gate in self._gates:
            cont_shapes.append(gate.shape)
            index_str = ""
            for q in gate.input_qubits:
                index_str += oe.get_symbol(qubit_indexes[q])
                qubit_indexes[q] += 1
            for q in gate.input_qubits:
                index_str += oe.get_symbol(qubit_indexes[q])
            cont_indexes.append(index_str)

        cont_str = ""
        for i in range(len(cont_indexes)):
            if i != 0:
                cont_str += ","
            cont_str += cont_indexes[i]
        cont_out_str = ""
        for i in range(self._qubits_num):
            cont_out_str += oe.get_symbol(qubit_indexes[i])
        cont_str = cont_str + "->" + cont_out_str

        self.__set_gate_params()
        cont_tensors = self.__get_forward_tensor(None, None, self._gate_params)

        return oe.contract(cont_str, *cont_tensors).reshape(-1)
Beispiel #13
0
def test_sequential_logmatmulexp(batch_shape, state_dim, num_steps):
    logits = torch.randn(batch_shape + (num_steps, state_dim, state_dim))
    actual = _sequential_logmatmulexp(logits)
    assert actual.shape == batch_shape + (state_dim, state_dim)

    # Check against einsum.
    operands = list(logits.unbind(-3))
    symbol = (opt_einsum.get_symbol(i) for i in range(1000))
    batch_symbols = ''.join(next(symbol) for _ in batch_shape)
    state_symbols = [next(symbol) for _ in range(num_steps + 1)]
    equation = (','.join(batch_symbols + state_symbols[t] + state_symbols[t + 1]
                         for t in range(num_steps)) +
                '->' + batch_symbols + state_symbols[0] + state_symbols[-1])
    expected = opt_einsum.contract(equation, *operands, backend='pyro.ops.einsum.torch_log')
    assert_close(actual, expected)
Beispiel #14
0
def test_chain_2(size, backend):
    xs = [np.random.rand(2, 2) for _ in range(size)]
    shapes = [x.shape for x in xs]
    alphabet = ''.join(get_symbol(i) for i in range(size + 1))
    names = [alphabet[i:i + 2] for i in range(size)]
    inputs = ','.join(names)

    with shared_intermediates():
        print(inputs)
        for i in range(size):
            target = alphabet[i:i + 2]
            eq = '{}->{}'.format(inputs, target)
            path_info = contract_path(eq, *xs)
            print(path_info[1])
            expr = contract_expression(eq, *shapes)
            expr(*xs, backend=backend)
        print('-' * 40)
Beispiel #15
0
def test_chain_2(size, backend):
    xs = [np.random.rand(2, 2) for _ in range(size)]
    shapes = [x.shape for x in xs]
    alphabet = ''.join(get_symbol(i) for i in range(size + 1))
    names = [alphabet[i:i+2] for i in range(size)]
    inputs = ','.join(names)

    with shared_intermediates():
        print(inputs)
        for i in range(size):
            target = alphabet[i:i+2]
            eq = '{}->{}'.format(inputs, target)
            path_info = contract_path(eq, *xs)
            print(path_info[1])
            expr = contract_expression(eq, *shapes)
            expr(*xs, backend=backend)
        print('-' * 40)
Beispiel #16
0
def rand_reg_contract(n, deg, seed=None):
    import networkx as nx

    rG = nx.random_regular_graph(deg, n, seed=seed)
    edge2ind = {
        tuple(sorted(e)): oe.get_symbol(i)
        for i, e in enumerate(rG.edges)
    }

    inputs = [{edge2ind[tuple(sorted(e))]
               for e in rG.edges(nd)} for nd in rG.nodes]
    output = {}

    eq = (",".join(["".join(i)
                    for i in inputs]) + "->{}".format("".join(output)))
    shapes = [(2, ) * deg] * n
    views = list(map(Shaped, shapes))

    return eq, shapes, views, inputs, output
Beispiel #17
0
def make_plated_hmm_einsum(num_steps, num_obs_plates=1, num_hidden_plates=0):

    assert num_obs_plates >= num_hidden_plates
    t0 = num_obs_plates + 1

    obs_plates = ''.join(opt_einsum.get_symbol(i) for i in range(num_obs_plates))
    hidden_plates = ''.join(opt_einsum.get_symbol(i) for i in range(num_hidden_plates))

    inputs = [str(opt_einsum.get_symbol(t0))]
    for t in range(t0, num_steps+t0):
        inputs.append(str(opt_einsum.get_symbol(t)) + str(opt_einsum.get_symbol(t+1)) + hidden_plates)
        inputs.append(str(opt_einsum.get_symbol(t+1)) + obs_plates)
    equation = ",".join(inputs) + "->"
    return (equation, ''.join(set(obs_plates + hidden_plates)))
Beispiel #18
0
def test_chain_2_growth(backend):
    sizes = list(range(1, 21))
    costs = []
    for size in sizes:
        xs = [np.random.rand(2, 2) for _ in range(size)]
        alphabet = ''.join(get_symbol(i) for i in range(size + 1))
        names = [alphabet[i:i+2] for i in range(size)]
        inputs = ','.join(names)

        with shared_intermediates() as cache:
            for i in range(size):
                target = alphabet[i:i+2]
                eq = '{}->{}'.format(inputs, target)
                expr = contract_expression(eq, *(x.shape for x in xs))
                expr(*xs, backend=backend)
            costs.append(_compute_cost(cache))

    print('sizes = {}'.format(repr(sizes)))
    print('costs = {}'.format(repr(costs)))
    for size, cost in zip(sizes, costs):
        print('{}\t{}'.format(size, cost))
Beispiel #19
0
def test_chain_2_growth(backend):
    sizes = list(range(1, 21))
    costs = []
    for size in sizes:
        xs = [np.random.rand(2, 2) for _ in range(size)]
        alphabet = ''.join(get_symbol(i) for i in range(size + 1))
        names = [alphabet[i:i + 2] for i in range(size)]
        inputs = ','.join(names)

        with shared_intermediates() as cache:
            for i in range(size):
                target = alphabet[i:i + 2]
                eq = '{}->{}'.format(inputs, target)
                expr = contract_expression(eq, *(x.shape for x in xs))
                expr(*xs, backend=backend)
            costs.append(_compute_cost(cache))

    print('sizes = {}'.format(repr(sizes)))
    print('costs = {}'.format(repr(costs)))
    for size, cost in zip(sizes, costs):
        print('{}\t{}'.format(size, cost))
Beispiel #20
0
    def get_full_init_state(self):
        if self._full_init_state is None:
            cont_tensors = []
            cont_str = ""
            for idx, st in enumerate(self._init_state):
                if idx != 0:
                    cont_str += ","
                if st.tensor == None:
                    print("Warning: some of init states is not defined yet.")
                    return None
                cont_tensors.append(
                    st.tensor.reshape([2
                                       for i in range(len(st.input_qubits))]))
                index_str = ""
                for q in st.input_qubits:
                    index_str += oe.get_symbol(q)
                cont_str += index_str

            self._full_init_state = oe.contract(cont_str,
                                                *cont_tensors).flatten()

        return self._full_init_state
Beispiel #21
0
    def __create_m_expr(self, midx):
        # TODO: use causal cone
        # l: the maximum number of index one qubit may use
        l = 2 * max(self._layer_nums) + 2
        cont_shapes = []
        cont_indexes = []
        qubit_indexes = [l * i for i in range(self._qubits_num)]

        # add qubit_tensor
        for st in self._init_state:
            cont_shapes.append([2**len(st.input_qubits)])
            index_str = ""
            for q in st.input_qubits:
                index_str += oe.get_symbol(qubit_indexes[q])
            cont_indexes.append(index_str)

        # add gate_tensor
        for gate in self._gates:
            cont_shapes.append(gate.shape)
            index_str = ""
            for q in gate.input_qubits:
                index_str += oe.get_symbol(qubit_indexes[q])
                qubit_indexes[q] += 1
            for q in gate.input_qubits:
                index_str += oe.get_symbol(qubit_indexes[q])
            cont_indexes.append(index_str)

        # add measurement
        cont_shapes.append(self._measurements[midx].shape)
        index_str = ""
        for q in self._measurements[midx].input_qubits:
            index_str += oe.get_symbol(qubit_indexes[q])
            qubit_indexes[q] += 1
        for q in self._measurements[midx].input_qubits:
            index_str += oe.get_symbol(qubit_indexes[q])
        cont_indexes.append(index_str)

        # add adjoint gate_tensor
        for gate in reversed(self._gates):
            cont_shapes.append(gate.shape)
            index_str = ""
            for q in gate.input_qubits:
                index_str += oe.get_symbol(qubit_indexes[q])
                qubit_indexes[q] += 1
            for q in gate.input_qubits:
                index_str += oe.get_symbol(qubit_indexes[q])
            cont_indexes.append(index_str)

        # add adjoint qubit_tensor
        for st in self._init_state:
            cont_shapes.append([2**len(st.input_qubits)])
            index_str = ""
            for q in st.input_qubits:
                index_str += oe.get_symbol(qubit_indexes[q])
            cont_indexes.append(index_str)

        cont_str = ""
        for i in range(len(cont_indexes)):
            if i != 0:
                cont_str += ","
            cont_str += cont_indexes[i]

        return oe.contract_expression(cont_str, *cont_shapes)
Beispiel #22
0
def combine_gates(gates):
    input_qubits = []
    qubit_indexes = []
    cont_indexes = []
    cont_shapes = []
    params = []
    for gate in gates:
        cont_shapes.append(gate.shape)
        index_str = ""
        for q in gate.input_qubits:
            if q not in input_qubits:
                input_qubits.append(q)
                qubit_indexes.append(2 * len(gates) * (len(input_qubits) - 1))
            ind = input_qubits.index(q)
            index_str += oe.get_symbol(qubit_indexes[ind])
            qubit_indexes[ind] += 1
        for q in gate.input_qubits:
            ind = input_qubits.index(q)
            index_str += oe.get_symbol(qubit_indexes[ind])
        cont_indexes.append(index_str)
        if gate.params is not None:
            params.extend(gate.params)

    cont_str = ""
    for i in range(len(cont_indexes)):
        if i != 0:
            cont_str += ","
        cont_str += cont_indexes[i]

    cont_out_str = ""
    for i in range(len(input_qubits)):
        cont_out_str += oe.get_symbol(2 * len(gates) * i)
    for i in range(len(input_qubits)):
        cont_out_str += oe.get_symbol(qubit_indexes[i])
    cont_str = cont_str + "->" + cont_out_str

    if params is None:
        cont_tensors = []
        for gate in gates:
            cont_tensors.append(gate.tensor.T.reshape(gate.shape))
        tensor = oe.contract(cont_str,
                             *cont_tensors).reshape(2**len(input_qubits), -1).T
        gate = qtc.Gate(input_qubits, tensor=tensor)
        return gate

    else:

        def func(params):
            cont_tensors = []
            idx = 0
            for gate in gates:
                if gate.params is not None:
                    cont_tensors.append(
                        gate.get_tensor_from_params(
                            params[idx:idx + len(gate.params)]).T.reshape(
                                gate.shape))
                    idx += len(gate.params)
                else:
                    cont_tensors.append(gate.tensor.T.reshape(gate.shape))
            return oe.contract(cont_str,
                               *cont_tensors).reshape(2**len(input_qubits),
                                                      -1).T

        func_jit = jit(func)
        params = np.array(params)
        gate = qtc.Gate(input_qubits, params=params, func=func_jit)
        return gate
Beispiel #23
0
phys_dim = 3
bond_dim = 10

# start with first site
# O--
# |
# O--
einsum_str = "ab,ac,"

for i in range(1, n - 1):
    # set the upper left/right, middle and lower left/right indices
    # --O--
    #   |
    # --O--
    j = 3 * i
    ul, ur, m, ll, lr = (oe.get_symbol(i)
                         for i in (j - 1, j + 2, j, j - 2, j + 1))
    einsum_str += "{}{}{},{}{}{},".format(m, ul, ur, m, ll, lr)

# finish with last site
# --O
#   |
# --O
i = n - 1
j = 3 * i
ul, m, ll, = (oe.get_symbol(i) for i in (j - 1, j, j - 2))
einsum_str += "{}{},{}{}".format(m, ul, m, ll)


def gen_shapes():
    yield (phys_dim, bond_dim)
def sycamore_circuit_simulation():
    bipartition_filename = join(
        PACKAGEDIR,
        'slicing_order/bipartition_n{}_m{}_s{}_{}_s2{}_{}.txt'.format(
            args.n, args.m, args.seed, args.seq, args.seed2,
            'simplify_' if args.simplify else ''))
    sliced_edges_filename = join(
        PACKAGEDIR,
        'slicing_order/n{}_m{}_s{}_{}_s2{}_{}gpulimit_{}_edges.txt'.format(
            args.n, args.m, args.seed, args.seq, args.seed2,
            'simplify_' if args.simplify else '', args.gpu_limit))
    bipartition_order_filename = join(
        PACKAGEDIR,
        'slicing_order/n{}_m{}_s{}_{}_s2{}_{}gpulimit_{}_ordernew.txt'.format(
            args.n, args.m, args.seed, args.seq, args.seed2,
            'simplify_' if args.simplify else '', args.gpu_limit))
    if not (exists(bipartition_filename) and exists(sliced_edges_filename)):
        tensors, labels, final_qubits_representation = get_tensors(
            args.n, args.m, seq=args.seq, simplify=args.simplify)
        tn = Contraction(tensors, labels)
        results = tn.bipartition(args.sc, args.tc, seed=args.seed2)
        if results is not None:
            tc, sc, tcs, scs, _ = tn.contraction(order=results[0].copy())
            print("Order found, tc: {:.5f}, sc: {:.1f}".format(tc, sc))
            print("tc of each steps: ", tcs)
            print("sc of each steps: ", scs)
        else:
            print("Desired order not found.")
            exit(0)

        group = results[2]
        head_idx = 0 if len(group[0]) > len(group[1]) else 1
        group_head = group[head_idx]
        final_qubits_num = [[], []]
        for i in range(2):
            for node in group[i]:
                if node in final_qubits_representation.keys():
                    final_qubits_num[i] += final_qubits_representation[node]
        print('initial partition:', group)
        print('qubits of each partition', final_qubits_num)
        print('qubits num',
              (len(final_qubits_num[0]), len(final_qubits_num[1])))
        print('cut size of initial partition', results[3])

        # bipartition_order_filename = join(PACKAGEDIR, 'slicing_order/bipartition_n{}_m{}_s{}_{}_s2{}_{}.txt'.format(
        #     args.n, args.m, args.seed, args.seq, args.seed2, 'simplify_' if args.simplify else ''
        # ))

        if min(len(final_qubits_num[0]), len(final_qubits_num[1])) > 10:
            write_community(group, bipartition_filename)
            order_sliced, slicing_edges = dynamic_slicing(
                tn,
                group_head,
                results[0].copy(),
                seed_order=args.seed2,
                random_init=False)
            order_sliced_new = [(group_head.index(i), group_head.index(j))
                                for i, j in order_sliced]
            write_order(order_sliced_new, bipartition_order_filename)
            write_order(slicing_edges, sliced_edges_filename)
            num_sliced_edges = len(slicing_edges)
            # for trial in range(20):
            #     result_slicing = dynamic_slicing(tc+0.5, sc, args.n, args.m, args.seed, args.seq, args.simplify, args.gpu_limit, args.seed2)
            #     if result_slicing is not None:
            #         num_sliced_edges = result_slicing
            #         break
        else:
            print(
                'num of open qubits is not enough, please try another partition'
            )
            exit(1)
    else:
        num_sliced_edges = len(read_order(sliced_edges_filename))

    if not exists(PACKAGEDIR + '/slicing_components/'):
        makedirs(PACKAGEDIR + '/slicing_components/')
    if not exists(PACKAGEDIR + '/samples/'):
        makedirs(PACKAGEDIR + '/samples/')

    model = SycamoreBipartitionBatch(**vars(args))
    sliced_edges_on_cut = []
    for i in range(len(model.sliced_edges)):
        edge = model.sliced_edges[i]
        if (edge[0] in model.group_mcmc and edge[1]
                in model.group_slicing) or (edge[1] in model.group_mcmc and
                                            edge[0] in model.group_slicing):
            sliced_edges_on_cut.append(edge)
    '''
    for edge in sliced_edges_on_cut:
        model.sliced_edges.remove(edge)
        model.sliced_edges.insert(0, edge)
    '''
    if not exists(model.data_filename):
        subtask_filename = join(
            PACKAGEDIR,
            'slicing_components/n{}_m{}_s{}_{}_s2{}_sm{}_{}gpulimit_{}_data{}.pt'
            .format(args.n, args.m, args.seed, args.seq, args.seed2,
                    args.seed_mcmc, 'simplify_' if args.simplify else '',
                    args.gpu_limit,
                    '_complex128' if args.complex128 else '_complex64'))
        subtask_completed = 0
        if num_sliced_edges - len(sliced_edges_on_cut) <= args.task_num:
            args.task_num = num_sliced_edges - len(sliced_edges_on_cut)
        subtask_all = num_sliced_edges - args.task_num
        for i in range(2**subtask_all):
            if exists(subtask_filename.strip('.pt') + '_{}.pt'.format(i)):
                subtask_completed += 1

        if subtask_completed <= 2**subtask_all:
            for i in range(subtask_completed, 2**subtask_all):
                model.slicing_subtasks(i, task_num=args.task_num)

        num = 0
        cut_vector_filename = join(
            PACKAGEDIR,
            'samples/n{}_m{}_s{}_{}_s2{}_sm{}_{}gpulimit_{}_data_{}.pt'.format(
                args.n, args.m, args.seed, args.seq, args.seed2,
                args.seed_mcmc, 'simplify_' if args.simplify else '',
                args.gpu_limit,
                'complex128' if args.complex128 else 'complex64'))
        for i in range(2**subtask_all):
            if not exists(subtask_filename.strip('.pt') + '_{}.pt'.format(i)):
                print('{} not exist'.format(i))
            else:
                num += 1
        if num == 2**subtask_all:
            for i in range(2**subtask_all):
                tensor_slicing, t_all, t_gpu, eq_slicing = torch.load(
                    subtask_filename.strip('.pt') + '_{}.pt'.format(i))
                if i == 0:
                    tensor_slicing_fixed = torch.zeros(
                        [2] * (len(sliced_edges_on_cut) + len(eq_slicing)),
                        dtype=torch.complex128).reshape(
                            2**len(sliced_edges_on_cut), -1)
                j = i // 2**(subtask_all - len(sliced_edges_on_cut))
                tensor_slicing_fixed[j] += tensor_slicing.reshape(-1)
            tensor_slicing_fixed = tensor_slicing_fixed.reshape(
                [2] * (len(sliced_edges_on_cut) + len(eq_slicing)))
            eq_slicing_fixed = ''
            for i in range(len(sliced_edges_on_cut)):
                eq_slicing_fixed += oe.get_symbol(
                    model.model.edges.index(model.sliced_edges[i]))
            eq_slicing_fixed += eq_slicing
        torch.save((tensor_slicing_fixed, eq_slicing_fixed),
                   cut_vector_filename)
    if not exists(model.samples_filename):
        samples_batch, _ = model.batch_sampling()
    else:
        samples_batch = read_samples(model.samples_filename)

    return samples_batch
Beispiel #25
0
def merge(a: Gate, *bs) -> Gate:
    """
    Merge two gates `a` and `b`. The merged `Gate` will be equivalent to apply
    ```
    new_psi = bs.matrix() @ ... @ b.matrix() @ a.matrix() @ psi
    ```
    with `psi` a quantum state.

    Parameters
    ----------
    a, ...: Gate
        `Gate`s to merge.
    qubits_order: iter[any], optional
        If provided, qubits in new `Gate` will be sorted using `qubits_order`.

    Returns
    -------
    Gate('MATRIX')
        The merged `Gate`
    """
    # If no other gates are provided, return
    if len(bs) == 0:
        return a

    # Pop first gate
    b, bs = bs[0], bs[1:]

    # Check
    if any(not x.provides(['matrix', 'qubits']) or x.qubits is None
           for x in [a, b]):
        raise ValueError(
            "Both 'a' and 'b' must provides 'qubits' and 'matrix'.")

    # Get unitaries
    Ua, Ub = a.matrix(), b.matrix()

    # Get shared qubits
    shared_qubits = set(a.qubits).intersection(b.qubits)
    all_qubits = b.qubits + tuple(q for q in a.qubits if q not in b.qubits)

    # Get sizes
    n_a = len(a.qubits)
    n_b = len(b.qubits)
    n_ab = len(shared_qubits)
    n_c = len(all_qubits)

    if shared_qubits:
        from opt_einsum import get_symbol, contract
        # Build map
        _map_b_l = ''.join(get_symbol(x) for x in range(n_b))
        _map_b_r = ''.join(get_symbol(x + n_b) for x in range(n_b))
        _map_a_l = ''.join(_map_b_r[b.qubits.index(q)] if q in
                           shared_qubits else get_symbol(x + 2 * n_b)
                           for x, q in enumerate(a.qubits))
        _map_a_r = ''.join(get_symbol(x + 2 * n_b + n_a) for x in range(n_a))
        _map_c_l = ''.join(_map_b_l[b.qubits.index(q)] if q in
                           b.qubits else _map_a_l[a.qubits.index(q)]
                           for q in all_qubits)
        _map_c_r = ''.join(
            _map_b_r[b.qubits.index(q)] if q in b.qubits and
            q not in shared_qubits else _map_a_r[a.qubits.index(q)]
            for q in all_qubits)
        _map = _map_b_l + _map_b_r + ',' + _map_a_l + _map_a_r + '->' + _map_c_l + _map_c_r

        # Get matrix
        U = np.reshape(
            contract(_map, np.reshape(Ub, (2,) * 2 * n_b),
                     np.reshape(Ua, (2,) * 2 * n_a)), (2**n_c, 2**n_c))
    else:
        # Get matrix
        U = np.kron(Ub, Ua)

    # Get merged gate
    gate = Gate('MATRIX', qubits=all_qubits, U=U)

    # Iteratively call merge
    if len(bs) == 0:
        return gate
    else:
        return merge(gate, *bs)
Beispiel #26
0
def _simulate_tn(circuit: any, initial_state: any, final_state: any,
                 optimize: any, backend: any, complex_type: any,
                 tensor_only: bool, verbose: bool, **kwargs):
    import quimb.tensor as tn
    import cotengra as ctg

    # Get random leaves_prefix
    leaves_prefix = ''.join(
        np.random.choice(list('abcdefghijklmnopqrstuvwxyz'), size=20))

    # Initialize info
    _sim_info = {}

    # Alias for tn
    if optimize == 'tn':
        optimize = 'cotengra'

    if isinstance(circuit, Circuit):

        # Get number of qubits
        qubits = circuit.all_qubits()
        n_qubits = len(qubits)

        # If initial/final state is None, set to all .'s
        initial_state = '.' * n_qubits if initial_state is None else initial_state
        final_state = '.' * n_qubits if final_state is None else final_state

        # Initial and final states must be valid strings
        for state, sname in [(initial_state, 'initial_state'),
                             (final_state, 'final_state')]:
            # Get alphabet
            from string import ascii_letters

            # Check if string
            if not isinstance(state, str):
                raise ValueError(f"'{sname}' must be a valid string.")

            # Deprecated error
            if any(x in 'xX' for x in state):
                from hybridq.utils import DeprecationWarning
                from warnings import warn

                # Warn the user that '.' is used to represent open qubits
                warn(
                    "Since '0.6.3', letters in the alphabet are used to "
                    "trace selected qubits (including 'x' and 'X'). "
                    "Instead, '.' is used to represent an open qubit.",
                    DeprecationWarning)

            # Check only valid symbols are present
            if set(state).difference('01+-.' + ascii_letters):
                raise ValueError(f"'{sname}' contains invalid symbols.")

            # Check number of qubits
            if len(state) != n_qubits:
                raise ValueError(f"'{sname}' has the wrong number of qubits "
                                 f"(expected {n_qubits}, got {len(state)})")

        # Check memory
        if 2**(initial_state.count('.') +
               final_state.count('.')) > kwargs['max_largest_intermediate']:
            raise MemoryError("Memory for the given number of open qubits "
                              "exceeds the 'max_largest_intermediate'.")

        # Compress circuit
        if kwargs['compress']:
            if verbose:
                print(
                    f"Compress circuit (max_n_qubits={kwargs['compress']}): ",
                    end='',
                    file=stderr)
                _time = time()

            circuit = utils.compress(
                circuit,
                kwargs['compress']['max_n_qubits'] if isinstance(
                    kwargs['compress'], dict) else kwargs['compress'],
                verbose=verbose,
                **({
                    k: v
                    for k, v in kwargs['compress'].items()
                    if k != 'max_n_qubits'
                } if isinstance(kwargs['compress'], dict) else {}))

            circuit = Circuit(
                utils.to_matrix_gate(c, complex_type=complex_type)
                for c in circuit)
            if verbose:
                print(f"Done! ({time()-_time:1.2f}s)", file=stderr)

        # Get tensor network representation of circuit
        tensor, tn_qubits_map = utils.to_tn(circuit,
                                            return_qubits_map=True,
                                            leaves_prefix=leaves_prefix)

        # Define basic MPS
        _mps = {
            '0': np.array([1, 0]),
            '1': np.array([0, 1]),
            '+': np.array([1, 1]) / np.sqrt(2),
            '-': np.array([1, -1]) / np.sqrt(2)
        }

        # Attach initial/final state
        for state, ext in [(initial_state, 'i'), (final_state, 'f')]:
            for s, q in ((s, q) for s, q in zip(state, qubits) if s in _mps):
                inds = [f'{leaves_prefix}_{tn_qubits_map[q]}_{ext}']
                tensor &= tn.Tensor(_mps[s], inds=inds, tags=inds)

        # For each unique letter, apply trace
        for x in set(initial_state + final_state).difference(''.join(_mps) +
                                                             '.'):
            # Get indexes
            inds = [
                f'{leaves_prefix}_{tn_qubits_map[q]}_i'
                for s, q in zip(initial_state, qubits) if s == x
            ]
            inds += [
                f'{leaves_prefix}_{tn_qubits_map[q]}_f'
                for s, q in zip(final_state, qubits) if s == x
            ]

            # Apply trace
            tensor &= tn.Tensor(np.reshape([1] + [0] * (2**len(inds) - 2) +
                                           [1], (2, ) * len(inds)),
                                inds=inds)

        # Simplify if requested
        if kwargs['simplify_tn']:
            tensor.full_simplify_(kwargs['simplify_tn']).astype_(complex_type)
        else:
            # Otherwise, just convert to the given complex_type
            tensor.astype_(complex_type)

        # Get contraction from heuristic
        if optimize == 'cotengra' and kwargs['max_iterations'] > 0:

            # Create local client if MPI has been detected (not compatible with Dask at the moment)
            if _mpi_env and kwargs['parallel']:

                from distributed import Client, LocalCluster
                _client = Client(LocalCluster(processes=False))

            else:

                _client = None

            # Set cotengra parameters
            cotengra_params = lambda: ctg.HyperOptimizer(
                methods=kwargs['methods'],
                max_time=kwargs['max_time'],
                max_repeats=kwargs['max_repeats'],
                minimize=kwargs['minimize'],
                progbar=verbose,
                parallel=kwargs['parallel'],
                **kwargs['cotengra'])

            # Get optimized path
            opt = cotengra_params()
            info = tensor.contract(all, optimize=opt, get='path-info')

            # Get target size
            tli = kwargs['target_largest_intermediate']

            # Repeat for the requested number of iterations
            for _ in range(1, kwargs['max_iterations']):

                # Break if largest intermediate is equal or smaller than target
                if info.largest_intermediate <= tli:
                    break

                # Otherwise, restart
                _opt = cotengra_params()
                _info = tensor.contract(all, optimize=_opt, get='path-info')

                # Store the best
                if kwargs['minimize'] == 'size':

                    if _info.largest_intermediate < info.largest_intermediate or (
                            _info.largest_intermediate
                            == info.largest_intermediate
                            and _opt.best['flops'] < opt.best['flops']):
                        info = _info
                        opt = _opt

                else:

                    if _opt.best['flops'] < opt.best['flops'] or (
                            _opt.best['flops'] == opt.best['flops']
                            and _info.largest_intermediate <
                            info.largest_intermediate):
                        info = _info
                        opt = _opt

            # Close client if exists
            if _client:

                _client.shutdown()
                _client.close()

        # Just return tensor if required
        if tensor_only:
            if optimize == 'cotengra' and kwargs['max_iterations'] > 0:
                return tensor, (info, opt)
            else:
                return tensor

    else:

        # Set tensor
        tensor = circuit

        if len(optimize) == 2 and isinstance(
                optimize[0], PathInfo) and isinstance(
                    optimize[1], ctg.hyper.HyperOptimizer):

            # Get info and opt from optimize
            info, opt = optimize

            # Set optimization
            optimize = 'cotengra'

        else:

            # Get tensor and path
            tensor = circuit

    # Print some info
    if verbose:
        print(
            f'Largest Intermediate: 2^{np.log2(float(info.largest_intermediate)):1.2f}',
            file=stderr)
        print(
            f'Max Largest Intermediate: 2^{np.log2(float(kwargs["max_largest_intermediate"])):1.2f}',
            file=stderr)
        print(f'Flops: 2^{np.log2(float(info.opt_cost)):1.2f}', file=stderr)

    if optimize == 'cotengra':

        # Get indexes
        _inds = tensor.outer_inds()

        # Get input indexes and output indexes
        _i_inds = sort([x for x in _inds if x[-2:] == '_i'],
                       key=lambda x: int(x.split('_')[1]))
        _f_inds = sort([x for x in _inds if x[-2:] == '_f'],
                       key=lambda x: int(x.split('_')[1]))

        # Get order
        _inds = [_inds.index(x) for x in _i_inds + _f_inds]

        # Get slice finder
        sf = ctg.SliceFinder(info,
                             target_size=kwargs['max_largest_intermediate'])

        # Find slices
        with tqdm(kwargs['temperatures'], disable=not verbose,
                  leave=False) as pbar:
            for _temp in pbar:
                pbar.set_description(f'Find slices (T={_temp})')
                ix_sl, cost_sl = sf.search(temperature=_temp)

        # Get slice contractor
        sc = sf.SlicedContractor([t.data for t in tensor])

        # Update infos
        _sim_info.update({
            'flops': info.opt_cost,
            'largest_intermediate': info.largest_intermediate,
            'n_slices': cost_sl.nslices,
            'total_flops': cost_sl.total_flops
        })

        # Print some infos
        if verbose:
            print(
                f'Number of slices: 2^{np.log2(float(cost_sl.nslices)):1.2f}',
                file=stderr)
            print(f'Flops+Cuts: 2^{np.log2(float(cost_sl.total_flops)):1.2f}',
                  file=stderr)

        if kwargs['max_n_slices'] and sc.nslices > kwargs['max_n_slices']:
            raise RuntimeError(
                f'Too many slices ({sc.nslices} > {kwargs["max_n_slices"]})')

        # Contract tensor
        _li = np.log2(float(info.largest_intermediate))
        _mli = np.log2(float(kwargs["max_largest_intermediate"]))
        _tensor = sc.gather_slices((sc.contract_slice(
            i, backend=backend
        ) for i in tqdm(
            range(sc.nslices),
            desc=f'Contracting tensor (li=2^{_li:1.0f}, mli=2^{_mli:1.1f})',
            leave=False)))

        # Create map
        _map = ''.join([get_symbol(x) for x in range(len(_inds))])
        _map += '->'
        _map += ''.join([get_symbol(x) for x in _inds])

        # Reorder tensor
        tensor = contract(_map, _tensor)

        # Deprecated
        ## Reshape tensor
        #if _inds:
        #    if _i_inds and _f_inds:
        #        tensor = np.reshape(tensor, (2**len(_i_inds), 2**len(_f_inds)))
        #    else:
        #        tensor = np.reshape(tensor,
        #                            (2**max(len(_i_inds), len(_f_inds)),))

    else:

        # Contract tensor
        tensor = tensor.contract(optimize=optimize, backend=backend)

        if hasattr(tensor, 'inds'):

            # Get input indexes and output indexes
            _i_inds = sort([x for x in tensor.inds if x[-2:] == '_i'],
                           key=lambda x: int(x.split('_')[1]))
            _f_inds = sort([x for x in tensor.inds if x[-2:] == '_f'],
                           key=lambda x: int(x.split('_')[1]))

            # Transpose tensor
            tensor.transpose(*(_i_inds + _f_inds), inplace=True)

            # Deprecated
            ## Reshape tensor
            #if _i_inds and _f_inds:
            #    tensor = np.reshape(tensor, (2**len(_i_inds), 2**len(_f_inds)))
            #else:
            #    tensor = np.reshape(tensor,
            #                        (2**max(len(_i_inds), len(_f_inds)),))

    if kwargs['return_info']:
        return tensor, _sim_info
    else:
        return tensor
Beispiel #27
0
def _simulate_evolution(circuit: iter[Gate], initial_state: any,
                        final_state: any, optimize: any, backend: any,
                        complex_type: any, verbose: bool, **kwargs):
    """
    Perform simulation of the circuit by using the direct evolution of the quantum state.
    """

    if _detect_mpi:
        warn("Detected MPI but optimize='evolution' does not support MPI.")

    # Initialize info
    _sim_info = {}

    # Convert iterable to circuit
    circuit = Circuit(circuit)

    # Get number of qubits
    qubits = circuit.all_qubits()
    n_qubits = len(qubits)

    # Check if core libraries have been loaded properly
    if any(not x for x in
           [_swap_core, _dot_core, _to_complex_core, _log2_pack_size]):
        warn("Cannot find C++ HybridQ core. "
             "Falling back to optimize='evolution-einsum' instead.")
        optimize = 'einsum'

    # If the system is too small, fallback to einsum
    if optimize == 'hybridq' and n_qubits <= max(10, _log2_pack_size):
        warn("The system is too small to use optimize='evolution-hybridq'. "
             "Falling back to optimize='evolution-einsum'")
        optimize = 'einsum'

    if verbose:
        print(f'# Optimization: {optimize}', file=stderr)

    # Check memory
    if 2**n_qubits > kwargs['max_largest_intermediate']:
        raise MemoryError(
            "Memory for the given number of qubits exceeds the 'max_largest_intermediate'."
        )

    # If final_state is specified, warn user
    if final_state is not None:
        warn(
            f"'final_state' cannot be specified in optimize='{optimize}'. Ignoring 'final_state'."
        )

    # Initial state must be provided
    if initial_state is None:
        raise ValueError(
            "'initial_state' must be specified for optimize='evolution'.")

    # Convert complex_type to np.dtype
    complex_type = np.dtype(complex_type)

    # Print info
    if verbose:
        print(f"Compress circuit (max_n_qubits={kwargs['compress']}): ",
              end='',
              file=stderr)
        _time = time()

    # Compress circuit
    circuit = utils.compress(
        circuit,
        kwargs['compress']['max_n_qubits'] if isinstance(
            kwargs['compress'], dict) else kwargs['compress'],
        verbose=verbose,
        skip_compression=[pr.FunctionalGate],
        **({
            k: v
            for k, v in kwargs['compress'].items() if k != 'max_n_qubits'
        } if isinstance(kwargs['compress'], dict) else {}))

    # Check that FunctionalGate's are not compressed
    assert (all(not isinstance(g, pr.FunctionalGate) if len(x) > 1 else True
                for x in circuit for g in x))

    # Compress everything which is not a FunctionalGate
    circuit = Circuit(g for c in (c if any(
        isinstance(g, pr.FunctionalGate)
        for g in c) else [utils.to_matrix_gate(c, complex_type=complex_type)]
                                  for c in circuit) for g in c)

    # Get state
    initial_state = prepare_state(initial_state,
                                  complex_type=complex_type) if isinstance(
                                      initial_state, str) else initial_state

    if verbose:
        print(f"Done! ({time()-_time:1.2f}s)", file=stderr)

    if optimize == 'hybridq':

        if complex_type not in ['complex64', 'complex128']:
            warn(
                "optimize=evolution-hybridq only support ['complex64', 'complex128']. Using 'complex64'."
            )
            complex_type = np.dtype('complex64')

        # Get float_type
        float_type = np.real(np.array(1, dtype=complex_type)).dtype

        # Get C float_type
        c_float_type = {
            np.dtype('float32'): ctypes.c_float,
            np.dtype('float64'): ctypes.c_double
        }[float_type]

        # Load libraries
        _apply_U = _dot_core[float_type]

        # Get swap core
        _swap = _swap_core[float_type]

        # Get to_complex core
        _to_complex = _to_complex_core[complex_type]

        # Get states
        _psi = aligned.empty(shape=(2, ) + initial_state.shape,
                             dtype=float_type,
                             order='C',
                             alignment=32)
        # Split in real and imaginary part
        _psi_re = _psi[0]
        _psi_im = _psi[1]

        # Check alignment
        assert (_psi_re.ctypes.data % 32 == 0)
        assert (_psi_im.ctypes.data % 32 == 0)

        # Get C-pointers
        _psi_re_ptr = _psi_re.ctypes.data_as(ctypes.POINTER(c_float_type))
        _psi_im_ptr = _psi_im.ctypes.data_as(ctypes.POINTER(c_float_type))

        # Initialize
        np.copyto(_psi_re, np.real(initial_state))
        np.copyto(_psi_im, np.imag(initial_state))

        # Create index maps
        _map = {q: n_qubits - x - 1 for x, q in enumerate(qubits)}
        _inv_map = [q for q, _ in sort(_map.items(), key=lambda x: x[1])]

        # Set largest swap_size
        _max_swap_size = 0

        # Start clock
        _ini_time = time()

        # Apply all gates
        for gate in tqdm(circuit, disable=not verbose):

            # FunctionalGate
            if isinstance(gate, pr.FunctionalGate):
                # Get order
                order = tuple(
                    q
                    for q, _ in sorted(_map.items(), key=lambda x: x[1])[::-1])

                # Apply gate to state
                new_psi, new_order = gate.apply(psi=_psi, order=order)

                # Copy back if needed
                if new_psi is not _psi:
                    # Align if needed
                    _psi = aligned.asarray(new_psi,
                                           order='C',
                                           alignment=32,
                                           dtype=_psi.dtype)

                    # Redefine real and imaginary part
                    _psi_re = _psi[0]
                    _psi_im = _psi[1]

                    # Get C-pointers
                    _psi_re_ptr = _psi_re.ctypes.data_as(
                        ctypes.POINTER(c_float_type))
                    _psi_im_ptr = _psi_im.ctypes.data_as(
                        ctypes.POINTER(c_float_type))

                # This can be eventually fixed ...
                if any(x != y for x, y in zip(order, new_order)):
                    raise RuntimeError("'order' has changed.")

            elif gate.provides(['qubits', 'matrix']):

                # Check if any qubits is withing the pack_size
                if any(q in _inv_map[:_log2_pack_size] for q in gate.qubits):

                    #@@@ Alternative way to always use the smallest swap size
                    #@@@
                    #@@@ # Get positions
                    #@@@ _pos = np.fromiter((_map[q] for q in gate.qubits),
                    #@@@                    dtype=int)

                    #@@@ # Get smallest swap size
                    #@@@ _swap_size = 0 if np.all(_pos >= _log2_pack_size) else next(
                    #@@@     k
                    #@@@     for k in range(_log2_pack_size, 2 *
                    #@@@                    max(len(_pos), _log2_pack_size) + 1)
                    #@@@     if sum(_pos < k) <= k - _log2_pack_size)

                    #@@@ # Get new order
                    #@@@ _order = [
                    #@@@     x for x, q in enumerate(_inv_map[:_swap_size])
                    #@@@     if q not in gate.qubits
                    #@@@ ]
                    #@@@ _order += [
                    #@@@     x for x, q in enumerate(_inv_map[:_swap_size])
                    #@@@     if q in gate.qubits
                    #@@@ ]

                    if len(gate.qubits) <= 4:

                        # Get new order
                        _order = [
                            x for x, q in enumerate(_inv_map[:8])
                            if q not in gate.qubits
                        ]
                        _order += [
                            x for x, q in enumerate(_inv_map[:8])
                            if q in gate.qubits
                        ]

                    else:

                        # Get qubit indexes for gate
                        _gate_idxs = [_inv_map.index(q) for q in gate.qubits]

                        # Get new order
                        _order = [
                            x for x in range(n_qubits) if x not in _gate_idxs
                        ][:_log2_pack_size]
                        _order += [x for x in _gate_idxs if x < max(_order)]

                    # Get swap size
                    _swap_size = len(_order)

                    # Update max swap size
                    if _swap_size > _max_swap_size:
                        _max_swap_size = _swap_size

                    # Update maps
                    _inv_map[:_swap_size] = [
                        _inv_map[:_swap_size][x] for x in _order
                    ]
                    _map.update(
                        {q: x
                         for x, q in enumerate(_inv_map[:_swap_size])})

                    # Apply swap
                    _order = np.array(_order, dtype='uint32')
                    _swap(
                        _psi_re_ptr,
                        _order.ctypes.data_as(ctypes.POINTER(ctypes.c_uint32)),
                        n_qubits, len(_order))
                    _swap(
                        _psi_im_ptr,
                        _order.ctypes.data_as(ctypes.POINTER(ctypes.c_uint32)),
                        n_qubits, len(_order))

                # Get positions
                _pos = np.array([_map[q] for q in reversed(gate.qubits)],
                                dtype='uint32')

                # Get matrix
                _U = np.asarray(gate.matrix(), dtype=complex_type, order='C')

                # Apply matrix
                if _apply_U(
                        _psi_re_ptr, _psi_im_ptr,
                        _U.ctypes.data_as(ctypes.POINTER(c_float_type)),
                        _pos.ctypes.data_as(ctypes.POINTER(ctypes.c_uint32)),
                        n_qubits, len(_pos)):

                    raise RuntimeError('something went wrong')

            else:
                raise RuntimeError(f"'{gate}' not supported")

        # Check maps are still consistent
        assert (all(_inv_map[_map[q]] == q for q in _map))

        # Swap back to the correct order
        _order = np.array([_inv_map.index(q)
                           for q in reversed(qubits)][:_max_swap_size],
                          dtype='uint32')
        _swap(_psi_re_ptr,
              _order.ctypes.data_as(ctypes.POINTER(ctypes.c_uint32)), n_qubits,
              len(_order))
        _swap(_psi_im_ptr,
              _order.ctypes.data_as(ctypes.POINTER(ctypes.c_uint32)), n_qubits,
              len(_order))

        # Stop clock
        _end_time = time()

        # Copy the results
        if kwargs['return_numpy_array']:
            _complex_psi = np.empty(_psi.shape[1:], dtype=complex_type)
            _to_complex(
                _psi_re_ptr, _psi_im_ptr,
                _complex_psi.ctypes.data_as(ctypes.POINTER(c_float_type)),
                2**n_qubits)
            _psi = _complex_psi

        # Update info
        _sim_info['runtime (s)'] = _end_time - _ini_time

    elif optimize.split('-')[0] == 'einsum':

        optimize = '-'.join(optimize.split('-')[1:])
        if not optimize:
            optimize = 'auto'

        # Split circuits to separate FunctionalGate's
        circuit = utils.compress(
            circuit,
            max_n_qubits=len(qubits),
            skip_compression=[pr.FunctionalGate],
            **({
                k: v
                for k, v in kwargs['compress'].items() if k != 'max_n_qubits'
            } if isinstance(kwargs['compress'], dict) else {}))

        # Check that FunctionalGate's are not compressed
        assert (all(
            not isinstance(g, pr.FunctionalGate) if len(x) > 1 else True
            for x in circuit for g in x))

        # Prepare initial_state
        _psi = initial_state

        # Initialize time
        _ini_time = time()
        for circuit in circuit:

            # Check
            assert (all(not isinstance(g, pr.FunctionalGate) for g in circuit)
                    or len(circuit) == 1)

            # Apply gate if functional
            if len(circuit) == 1 and isinstance(circuit[0], pr.FunctionalGate):

                # Apply gate to state
                _psi, qubits = circuit[0].apply(psi=_psi, order=qubits)

            else:
                # Get gates and corresponding qubits
                _qubits, _gates = zip(
                    *((c.qubits,
                       np.reshape(c.matrix().astype(complex_type), (2, ) *
                                  (2 * len(c.qubits)))) for c in circuit))

                # Initialize map
                _map = {q: get_symbol(x) for x, q in enumerate(qubits)}
                _count = n_qubits
                _path = ''.join((_map[q] for q in qubits))

                # Generate map
                for _qs in _qubits:

                    # Initialize local paths
                    _path_in = _path_out = ''

                    # Add incoming legs
                    for _q in _qs:
                        _path_in += _map[_q]

                    # Add outcoming legs
                    for _q in _qs:
                        _map[_q] = get_symbol(_count)
                        _count += 1
                        _path_out += _map[_q]

                    # Update path
                    _path = _path_out + _path_in + ',' + _path

                # Make sure that qubits order is preserved
                _path += '->' + ''.join([_map[q] for q in qubits])

                # Contracts
                _psi = contract(_path,
                                *reversed(_gates),
                                _psi,
                                backend=backend,
                                optimize=optimize)

                # Block JAX until result is ready (for a more precise runtime)
                if backend == 'jax' and kwargs['block_until_ready']:
                    _psi.block_until_ready()

        # Stop time
        _end_time = time()

        # Update info
        _sim_info['runtime (s)'] = _end_time - _ini_time

    else:

        raise ValueError(f"optimize='{optimize}' not implemented.")

    if verbose:
        print(f'# Runtime (s): {_sim_info["runtime (s)"]:1.2f}', file=stderr)

    # Return state
    if kwargs['return_info']:
        return _psi, _sim_info
    else:
        return _psi
def symbol_stream():
    for i in itertools.count():
        yield get_symbol(i)
Beispiel #29
0
                              use_mpi=False,
                              verbose=True)
    _states_tn_2 = simulation.simulate(_states_tn_2_tensor,
                                       optimize=(_states_tn_2_info,
                                                 _states_tn_2_opt),
                                       max_largest_intermediate=2**10,
                                       max_n_slices=2**11,
                                       verbose=True)

    # Broadcast
    _states_tn_1, _states_tn_2 = _mpi_comm.bcast((_states_tn_1, _states_tn_2),
                                                 root=0)

    # Compare with exact
    _xpos = [x for x, s in enumerate(final_state) if s == '.']
    _map = ''.join([get_symbol(x) for x in range(n_qubits)])
    _map += '->'
    _map += ''.join(
        ['' if x in _xpos else get_symbol(x) for x in range(n_qubits)])
    _map += ''.join([get_symbol(x) for x in _xpos])
    _states_expected_tn = contract(
        _map, np.reshape(_states_evolution, [2] * n_qubits))
    _states_expected_tn = _states_expected_tn[tuple(
        map(int,
            final_state.replace('.', '').zfill(n_qubits - len(_xpos))))]

    assert (np.isclose(np.linalg.norm(_states_using_matrix.flatten()), 1))
    assert (np.isclose(np.linalg.norm(_states_evolution.flatten()), 1))
    assert (_states_tn_1.shape == (2,) * (3 + final_state.count('.')))
    assert (np.allclose(_states_using_matrix, _states_evolution, atol=1e-5))
    assert (np.allclose(_states_using_matrix,
Beispiel #30
0
def _simulate_tn_mpi(circuit: Circuit, initial_state: any, final_state: any,
                     optimize: any, backend: any, complex_type: any,
                     tensor_only: bool, verbose: bool, **kwargs):
    import quimb.tensor as tn
    import cotengra as ctg

    # Get MPI
    _mpi_comm = MPI.COMM_WORLD
    _mpi_size = _mpi_comm.Get_size()
    _mpi_rank = _mpi_comm.Get_rank()

    # Set default parameters
    kwargs.setdefault('compress', 2)
    kwargs.setdefault('simplify_tn', 'RC')
    kwargs.setdefault('max_iterations', 1)
    kwargs.setdefault('methods', ['kahypar', 'greedy'])
    kwargs.setdefault('max_time', 120)
    kwargs.setdefault('max_repeats', 16)
    kwargs.setdefault('minimize', 'combo')
    kwargs.setdefault('target_largest_intermediate', 0)
    kwargs.setdefault('max_largest_intermediate', 2**26)
    kwargs.setdefault('temperatures', [1.0, 0.1, 0.01])
    kwargs.setdefault('parallel', None)
    kwargs.setdefault('cotengra', {})
    kwargs.setdefault('max_n_slices', None)
    kwargs.setdefault('return_info', False)

    # Get random leaves_prefix
    leaves_prefix = ''.join(
        np.random.choice(list('abcdefghijklmnopqrstuvwxyz'), size=20))

    # Initialize info
    _sim_info = {}

    # Alias for tn
    if optimize == 'tn':
        optimize = 'cotengra'

    if isinstance(circuit, Circuit):

        if not kwargs['parallel']:
            kwargs['parallel'] = 1
        else:
            # If number of threads not provided, just use half of the number of available cpus
            if isinstance(kwargs['parallel'],
                          bool) and kwargs['parallel'] == True:
                kwargs['parallel'] = cpu_count() // 2

        if optimize is not None and kwargs['parallel'] and kwargs[
                'max_iterations'] == 1:
            warn("Parallelization for MPI works for multiple iterations only. "
                 "For a better performance, use: 'max_iterations' > 1")

        # Get number of qubits
        qubits = circuit.all_qubits()
        n_qubits = len(qubits)

        # If initial/final state is None, set to all .'s
        initial_state = '.' * n_qubits if initial_state is None else initial_state
        final_state = '.' * n_qubits if final_state is None else final_state

        # Initial and final states must be valid strings
        for state, sname in [(initial_state, 'initial_state'),
                             (final_state, 'final_state')]:
            # Get alphabet
            from string import ascii_letters

            # Check if string
            if not isinstance(state, str):
                raise ValueError(f"'{sname}' must be a valid string.")

            # Deprecated error
            if any(x in 'xX' for x in state):
                from warnings import warn

                # Define new DeprecationWarning (to always print the warning
                # signal)
                class DeprecationWarning(Warning):
                    pass

                # Warn the user that '.' is used to represent open qubits
                warn(
                    "Since '0.6.3', letters in the alphabet are used to "
                    "trace selected qubits (including 'x' and 'X'). "
                    "Instead, '.' is used to represent an open qubit.",
                    DeprecationWarning)

            # Check only valid symbols are present
            if set(state).difference('01+-.' + ascii_letters):
                raise ValueError(f"'{sname}' contains invalid symbols.")

            # Check number of qubits
            if len(state) != n_qubits:
                raise ValueError(f"'{sname}' has the wrong number of qubits "
                                 f"(expected {n_qubits}, got {len(state)})")

        # Check memory
        if 2**(initial_state.count('.') +
               final_state.count('.')) > kwargs['max_largest_intermediate']:
            raise MemoryError("Memory for the given number of open qubits "
                              "exceeds the 'max_largest_intermediate'.")

        # Compress circuit
        if kwargs['compress']:
            if verbose:
                print(
                    f"Compress circuit (max_n_qubits={kwargs['compress']}): ",
                    end='',
                    file=stderr)
                _time = time()

            circuit = utils.compress(
                circuit,
                kwargs['compress']['max_n_qubits'] if isinstance(
                    kwargs['compress'], dict) else kwargs['compress'],
                verbose=verbose,
                **({
                    k: v
                    for k, v in kwargs['compress'].items()
                    if k != 'max_n_qubits'
                } if isinstance(kwargs['compress'], dict) else {}))

            circuit = Circuit(
                utils.to_matrix_gate(c, complex_type=complex_type)
                for c in circuit)
            if verbose:
                print(f"Done! ({time()-_time:1.2f}s)", file=stderr)

        # Get tensor network representation of circuit
        tensor, tn_qubits_map = utils.to_tn(circuit,
                                            return_qubits_map=True,
                                            leaves_prefix=leaves_prefix)

        # Define basic MPS
        _mps = {
            '0': np.array([1, 0]),
            '1': np.array([0, 1]),
            '+': np.array([1, 1]) / np.sqrt(2),
            '-': np.array([1, -1]) / np.sqrt(2)
        }

        # Attach initial/final state
        for state, ext in [(initial_state, 'i'), (final_state, 'f')]:
            for s, q in ((s, q) for s, q in zip(state, qubits) if s in _mps):
                inds = [f'{leaves_prefix}_{tn_qubits_map[q]}_{ext}']
                tensor &= tn.Tensor(_mps[s], inds=inds, tags=inds)

        # For each unique letter, apply trace
        for x in set(initial_state + final_state).difference(''.join(_mps) +
                                                             '.'):
            # Get indexes
            inds = [
                f'{leaves_prefix}_{tn_qubits_map[q]}_i'
                for s, q in zip(initial_state, qubits) if s == x
            ]
            inds += [
                f'{leaves_prefix}_{tn_qubits_map[q]}_f'
                for s, q in zip(final_state, qubits) if s == x
            ]

            # Apply trace
            tensor &= tn.Tensor(np.reshape([1] + [0] * (2**len(inds) - 2) +
                                           [1], (2, ) * len(inds)),
                                inds=inds)

        # Simplify if requested
        if kwargs['simplify_tn']:
            tensor.full_simplify_(kwargs['simplify_tn']).astype_(complex_type)
        else:
            # Otherwise, just convert to the given complex_type
            tensor.astype_(complex_type)

        # Get contraction from heuristic
        if optimize == 'cotengra' and kwargs['max_iterations'] > 0:

            # Set cotengra parameters
            def cotengra_params():
                # Get HyperOptimizer
                q = ctg.HyperOptimizer(methods=kwargs['methods'],
                                       max_time=kwargs['max_time'],
                                       max_repeats=kwargs['max_repeats'],
                                       minimize=kwargs['minimize'],
                                       progbar=False,
                                       parallel=False,
                                       **kwargs['cotengra'])

                # For some optlib, HyperOptimizer._retrieve_params is not
                # pickeable. Let's fix the problem by hand.
                q._retrieve_params = __FunctionWrap(q._retrieve_params)

                # Return HyperOptimizer
                return q

            # Get target size
            tli = kwargs['target_largest_intermediate']

            with Pool(kwargs['parallel']) as pool:

                # Sumbit jobs
                _opts = [
                    cotengra_params() for _ in range(kwargs['max_iterations'])
                ]
                _map = [
                    pool.apply_async(tensor.contract, (all, ),
                                     dict(optimize=_opt, get='path-info'))
                    for _opt in _opts
                ]

                with tqdm(total=len(_map),
                          disable=not verbose,
                          desc='Collecting contractions') as pbar:

                    _old_completed = 0
                    while 1:

                        # Count number of completed
                        _completed = 0
                        for _w in _map:
                            _completed += _w.ready()
                            if _w.ready() and not _w.successful():
                                _w.get()

                        # Update pbar
                        pbar.update(_completed - _old_completed)
                        _old_completed = _completed

                        if _completed == len(_map):
                            break

                        # Wait
                        sleep(1)

                # Collect results
                _infos = [_w.get() for _w in _map]

            if kwargs['minimize'] == 'size':
                opt, info = sort(
                    zip(_opts, _infos),
                    key=lambda w:
                    (w[1].largest_intermediate, w[0].best['flops']))[0]
            else:
                opt, info = sort(
                    zip(_opts, _infos),
                    key=lambda w:
                    (w[0].best['flops'], w[1].largest_intermediate))[0]

        if optimize == 'cotengra':

            # Gather best contractions
            _cost = _mpi_comm.gather(
                (info.largest_intermediate, info.opt_cost, _mpi_rank), root=0)
            if _mpi_rank == 0:
                if kwargs['minimize'] == 'size':
                    _best_rank = sort(_cost, key=lambda x: (x[0], x[1]))[0][-1]
                else:
                    _best_rank = sort(_cost, key=lambda x: (x[1], x[0]))[0][-1]
            else:
                _best_rank = None
            _best_rank = _mpi_comm.bcast(_best_rank, root=0)

            if hasattr(opt, '_pool'):
                del (opt._pool)

            # Distribute opt/info
            tensor, info, opt = _mpi_comm.bcast((tensor, info, opt),
                                                root=_best_rank)

        # Just return tensor if required
        if tensor_only:
            if optimize == 'cotengra' and kwargs['max_iterations'] > 0:
                return tensor, (info, opt)
            else:
                return tensor

    else:

        # Set tensor
        tensor = circuit

        if len(optimize) == 2 and isinstance(
                optimize[0], PathInfo) and isinstance(
                    optimize[1], ctg.hyper.HyperOptimizer):

            # Get info and opt from optimize
            info, opt = optimize

            # Set optimization
            optimize = 'cotengra'

        else:

            # Get tensor and path
            tensor = circuit

    # Print some info
    if verbose and _mpi_rank == 0:
        print(
            f'Largest Intermediate: 2^{np.log2(float(info.largest_intermediate)):1.2f}',
            file=stderr)
        print(
            f'Max Largest Intermediate: 2^{np.log2(float(kwargs["max_largest_intermediate"])):1.2f}',
            file=stderr)
        print(f'Flops: 2^{np.log2(float(info.opt_cost)):1.2f}', file=stderr)

    if optimize == 'cotengra':

        if _mpi_rank == 0:

            # Get indexes
            _inds = tensor.outer_inds()

            # Get input indexes and output indexes
            _i_inds = sort([x for x in _inds if x[-2:] == '_i'],
                           key=lambda x: int(x.split('_')[1]))
            _f_inds = sort([x for x in _inds if x[-2:] == '_f'],
                           key=lambda x: int(x.split('_')[1]))

            # Get order
            _inds = [_inds.index(x) for x in _i_inds + _f_inds]

            # Get slice finder
            sf = ctg.SliceFinder(
                info,
                target_size=kwargs['max_largest_intermediate'],
                allow_outer=False)

            # Find slices
            with tqdm(kwargs['temperatures'], disable=not verbose,
                      leave=False) as pbar:
                for _temp in pbar:
                    pbar.set_description(f'Find slices (T={_temp})')
                    ix_sl, cost_sl = sf.search(temperature=_temp)

            # Get slice contractor
            sc = sf.SlicedContractor([t.data for t in tensor])

            # Make sure that no open qubits are sliced
            assert (not {
                ix: i
                for i, ix in enumerate(sc.output) if ix in sc.sliced
            })

            # Print some infos
            if verbose:
                print(
                    f'Number of slices: 2^{np.log2(float(cost_sl.nslices)):1.2f}',
                    file=stderr)
                print(
                    f'Flops+Cuts: 2^{np.log2(float(cost_sl.total_flops)):1.2f}',
                    file=stderr)

            # Update infos
            _sim_info.update({
                'flops': info.opt_cost,
                'largest_intermediate': info.largest_intermediate,
                'n_slices': cost_sl.nslices,
                'total_flops': cost_sl.total_flops
            })

            # Get slices
            slices = list(range(cost_sl.nslices + 1)) + [None] * (
                _mpi_size -
                cost_sl.nslices) if cost_sl.nslices < _mpi_size else [
                    cost_sl.nslices / _mpi_size * i for i in range(_mpi_size)
                ] + [cost_sl.nslices]
            if not np.alltrue(
                [int(x) == x
                 for x in slices if x is not None]) or not np.alltrue([
                     slices[i] < slices[i + 1] for i in range(_mpi_size)
                     if slices[i] is not None and slices[i + 1] is not None
                 ]):
                raise RuntimeError('Something went wrong')

            # Convert all to integers
            slices = [int(x) if x is not None else None for x in slices]

        else:

            sc = slices = None

        # Distribute slicer and slices
        sc, slices = _mpi_comm.bcast((sc, slices), root=0)

        _n_slices = max(x for x in slices if x)
        if kwargs['max_n_slices'] and _n_slices > kwargs['max_n_slices']:
            raise RuntimeError(
                f'Too many slices ({_n_slices} > {kwargs["max_n_slices"]})')

        # Contract slices
        _tensor = None
        if slices[_mpi_rank] is not None and slices[_mpi_rank + 1] is not None:
            for i in tqdm(range(slices[_mpi_rank], slices[_mpi_rank + 1]),
                          desc='Contracting slices',
                          disable=not verbose,
                          leave=False):
                if _tensor is None:
                    _tensor = np.copy(sc.contract_slice(i, backend=backend))
                else:
                    _tensor += sc.contract_slice(i, backend=backend)

        # Gather tensors
        if _mpi_rank != 0:
            _mpi_comm.send(_tensor, dest=0, tag=11)
        elif _mpi_rank == 0:
            for i in tqdm(range(1, _mpi_size),
                          desc='Collecting tensors',
                          disable=not verbose):
                _p_tensor = _mpi_comm.recv(source=i, tag=11)
                if _p_tensor is not None:
                    _tensor += _p_tensor

        if _mpi_rank == 0:

            # Create map
            _map = ''.join([get_symbol(x) for x in range(len(_inds))])
            _map += '->'
            _map += ''.join([get_symbol(x) for x in _inds])

            # Reorder tensor
            tensor = contract(_map, _tensor)

            # Deprecated
            ## Reshape tensor
            #if _inds:
            #    if _i_inds and _f_inds:
            #        tensor = np.reshape(tensor,
            #                            (2**len(_i_inds), 2**len(_f_inds)))
            #    else:
            #        tensor = np.reshape(tensor,
            #                            (2**max(len(_i_inds), len(_f_inds)),))

        else:

            tensor = None

    else:

        if _mpi_rank == 0:

            # Contract tensor
            tensor = tensor.contract(optimize=optimize, backend=backend)

            if hasattr(tensor, 'inds'):

                # Get input indexes and output indexes
                _i_inds = sort([x for x in tensor.inds if x[-2:] == '_i'],
                               key=lambda x: int(x.split('_')[1]))
                _f_inds = sort([x for x in tensor.inds if x[-2:] == '_f'],
                               key=lambda x: int(x.split('_')[1]))

                # Transpose tensor
                tensor.transpose(*(_i_inds + _f_inds), inplace=True)

                # Deprecated
                ## Reshape tensor
                #if _i_inds and _f_inds:
                #    tensor = np.reshape(tensor,
                #                        (2**len(_i_inds), 2**len(_f_inds)))
                #else:
                #    tensor = np.reshape(tensor,
                #                        (2**max(len(_i_inds), len(_f_inds)),))

        else:

            tensor = None

    if kwargs['return_info']:
        return tensor, _sim_info
    else:
        return tensor
Beispiel #31
0
def rand_equation(n,
                  reg,
                  n_out=0,
                  n_hyper_in=0,
                  n_hyper_out=0,
                  d_min=2,
                  d_max=3,
                  seed=None):
    """A more advanced version of ``opt_einsum.helpers.rand_equation`` that
    can also generate both inner and outer hyper-edges. Mostly useful for
    generating test instances covering all edge cases.

    Parameters
    ----------
    n : int
        The number of tensors.
    reg : int
        The average number of indices per tensor if no hyper-edges, i.e.
        total number of inds ``= n * reg // 2``.
    n_out : int, optional
        The number of output indices.
    n_hyper_in : int, optional
        The number of inner hyper-indices.
    n_hyper_out : int, optional
        The number of outer hyper-indices.
    d_min : int, optional
        The minimum dimension size.
    d_max : int, optional
        The maximum dimension size.
    seed : None or int, optional
        Seed for ``np.random`` for repeatibility.

    Returns
    -------
    inputs : list[list[str]]
    output : list[str]
    shapes : list[tuple[int]]
    size_dict : dict[str, int]
    """
    import numpy as np
    import opt_einsum as oe

    if seed is not None:
        np.random.seed(seed)

    num_inds = max((n * reg) // 2, n_hyper_out + n_hyper_in + n_out)
    size_dict = {
        oe.get_symbol(i): np.random.randint(d_min, d_max + 1)
        for i in range(num_inds)
    }

    inds = iter(size_dict)
    inputs = [[] for _ in range(n)]
    output = []

    for _ in range(n_hyper_out):
        ind = next(inds)
        output.append(ind)
        s = np.random.randint(3, n + 1)
        where = np.random.choice(np.arange(n), size=s, replace=False)
        for i in where:
            inputs[i].append(ind)

    for _ in range(n_hyper_in):
        ind = next(inds)
        s = np.random.randint(3, n + 1)
        where = np.random.choice(np.arange(n), size=s, replace=False)
        for i in where:
            inputs[i].append(ind)

    for _ in range(n_out):
        ind = next(inds)
        output.append(ind)
        where = np.random.choice(np.arange(n), size=2, replace=False)
        for i in where:
            inputs[i].append(ind)

    for ind in inds:
        where = np.random.choice(np.arange(n), size=2, replace=False)
        for i in where:
            inputs[i].append(ind)

    shapes = [tuple(size_dict[ix] for ix in term) for term in inputs]

    output = list(np.random.permutation(output))

    return inputs, output, shapes, size_dict