예제 #1
0
def test_graph_breadth_first_trivial_graph():
    csgraph = np.array([[0]])
    csgraph = csgraph_from_dense(csgraph, null_value=0)

    bfirst = np.array([[0]])

    for directed in [True, False]:
        bfirst_test = breadth_first_tree(csgraph, 0, directed)
        assert_array_almost_equal(csgraph_to_dense(bfirst_test), bfirst)
예제 #2
0
def get_breadth_first_tree(csr_adj_matrix, node_index):
    original_adj_graph = nx.from_scipy_sparse_matrix(csr_adj_matrix)
    degree_list = sorted([(degree, node)
                          for node, degree in original_adj_graph.degree()],
                         reverse=True)
    start_node = degree_list[node_index]
    start_node = start_node[1]
    breadth_tree = csgraph.breadth_first_tree(
        csr_adj_matrix, start_node)  #makes a tree from breadth first search
    return breadth_tree
예제 #3
0
def test_graph_breadth_first_trivial_graph():
    csgraph = np.array([[0]])
    csgraph = csgraph_from_dense(csgraph, null_value=0)

    bfirst = np.array([[0]])

    for directed in [True, False]:
        bfirst_test = breadth_first_tree(csgraph, 0, directed)
        assert_array_almost_equal(csgraph_to_dense(bfirst_test),
                                  bfirst)
예제 #4
0
def test_graph_breadth_first():
    if csgraph_from_dense is None:
        raise SkipTest("Old version of scipy, doesn't have csgraph.")
    csgraph = np.array([[0, 1, 2, 0, 0], [1, 0, 0, 0, 3], [2, 0, 0, 7, 0], [0, 0, 7, 0, 1], [0, 3, 0, 1, 0]])
    csgraph = csgraph_from_dense(csgraph, null_value=0)

    bfirst = np.array([[0, 1, 2, 0, 0], [0, 0, 0, 0, 3], [0, 0, 0, 7, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]])

    for directed in [True, False]:
        bfirst_test = breadth_first_tree(csgraph, 0, directed)
        assert_array_almost_equal(csgraph_to_dense(bfirst_test), bfirst)
예제 #5
0
def test_graph_breadth_first():
    if csgraph_from_dense is None:
        raise SkipTest("Old version of scipy, doesn't have csgraph.")
    csgraph = np.array([[0, 1, 2, 0, 0], [1, 0, 0, 0, 3], [2, 0, 0, 7, 0],
                        [0, 0, 7, 0, 1], [0, 3, 0, 1, 0]])
    csgraph = csgraph_from_dense(csgraph, null_value=0)

    bfirst = np.array([[0, 1, 2, 0, 0], [0, 0, 0, 0, 3], [0, 0, 0, 7, 0],
                       [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]])

    for directed in [True, False]:
        bfirst_test = breadth_first_tree(csgraph, 0, directed)
        assert_array_almost_equal(csgraph_to_dense(bfirst_test), bfirst)
예제 #6
0
    adj_mat = np.zeros(M.shape)
    adj_mat[np.where(M>0)] = 1
    dims = [i for i in range(adj_mat.shape[0])]
    perms = set()

    while len(perms) < num_perms:
        perms.add(np.random.permutation(dims).tostring())

    bfts = []    
    for perm in perms:
        perm = np.frombuffer(perm, dtype=np.int)
        p_adj_mat = adj_mat[list(perm)][:, list(perm)]
        bft_mat = np.zeros(p_adj_mat.shape)
        for idx in dims:
            if np.sum(bft_mat[:, idx])== 0:
                bft_mat += breadth_first_tree(p_adj_mat, idx, directed=False).toarray()
        bfts.append((bft_mat, perm))

    n = [0]
    q = [0]

    for bft in bfts:
        bft_mat = bft[0]
        bft_perm = bft[1]
        while len(n) > 0:
            v = n.pop(0)
            children = np.nonzero(bft_mat.astype(int).T[:, v])[0]
            
            if not children.tolist() and len(q) < bft_mat.shape[0] and not n:
                q_set = set(q)
                children = [[idx for idx in range(bft_mat.shape[0]) if idx not in q_set][0]]
예제 #7
0
def _run_bfswpf(ppci, options, **kwargs):
    """
    SPARSE version of distribution power flow solution according to [1]
    :References:
    [1] Jen-Hao Teng, "A Direct Approach for Distribution System Load Flow Solutions",
    IEEE Transactions on Power Delivery, vol. 18, no. 3, pp. 882-887, July 2003.

    :param ppci: matpower-style case data
    :param options: pf options
    :return: results (pypower style), success (flag about PF convergence)
    """
    time_start = time()  # starting pf calculation timing

    baseMVA, bus, gen, branch, ref, pv, pq, \
    on, gbus, V0 = _get_pf_variables_from_ppci(ppci)

    enforce_q_lims, tolerance_kva, max_iteration, calculate_voltage_angles, numba = _get_options(
        options)

    numba, makeYbus = _import_numba_extensions_if_flag_is_true(numba)

    nobus = bus.shape[0]
    nobranch = branch.shape[0]

    # generate Sbus
    Sbus = makeSbus(baseMVA, bus, gen)
    # generate results for original bus ordering
    # Ybus, Yf, Yt = makeYbus(baseMVA, bus, branch)
    ppci, Ybus, Yf, Yt = _get_Y_bus(ppci, options, makeYbus, baseMVA, bus,
                                    branch)

    # creating network graph from list of branches
    bus_from = branch[:, F_BUS].real.astype(int)
    bus_to = branch[:, T_BUS].real.astype(int)
    G = csr_matrix((np.ones(nobranch), (bus_from, bus_to)),
                   shape=(nobus, nobus))
    # create spanning trees using breadth-first-search
    # TODO add efficiency warning if a network is heavy-meshed
    G_trees = []
    for refbus in ref:
        G_trees.append(csgraph.breadth_first_tree(G, refbus, directed=False))

        # depth-first-search bus ordering and generating Direct Load Flow matrix DLF = BCBV * BIBC
        ppci, DLF, buses_ordered_bfs_nets = _get_bibc_bcbv(
            ppci, options, bus, branch, G)

    # if there are trafos with phase-shift calculate Ybus without phase-shift for bfswpf
    any_trafo_shift = (branch[:, SHIFT] != 0).any()
    if any_trafo_shift:
        branch_noshift = branch.copy()
        branch_noshift[:, SHIFT] = 0
        Ybus_noshift, Yf_noshift, _ = makeYbus(baseMVA, bus, branch_noshift)
    else:
        Ybus_noshift = Ybus.copy()

    # get current injections for constant-current loads
    Ibus = _get_ibus(ppci)

    # #-----  run the power flow  -----
    V_final, success = _bfswpf(DLF, bus, gen, branch, baseMVA, Ybus_noshift,
                               Sbus, Ibus, V0, ref, pv, pq,
                               buses_ordered_bfs_nets, enforce_q_lims,
                               tolerance_kva, max_iteration, **kwargs)

    # if phase-shifting trafos are present adjust final state vector angles accordingly
    if calculate_voltage_angles and any_trafo_shift:
        brch_shift_mask = branch[:, SHIFT] != 0
        trafos_shift = dict(
            list(
                zip(
                    list(
                        zip(branch[brch_shift_mask, F_BUS].real.astype(int),
                            branch[brch_shift_mask, T_BUS].real.astype(int))),
                    branch[brch_shift_mask, SHIFT].real)))
        for trafo_ind, shift_degree in iteritems(trafos_shift):
            neti = 0
            # if multiple reference nodes, find in which network trafo is located
            if len(ref) > 0:
                for refbusi in range(len(ref)):
                    if trafo_ind[0] in buses_ordered_bfs_nets[refbusi]:
                        neti = refbusi
                        break
            G_tree = G_trees[neti]
            buses_ordered_bfs = buses_ordered_bfs_nets[neti]
            if (np.argwhere(buses_ordered_bfs == trafo_ind[0]) <
                    np.argwhere(buses_ordered_bfs == trafo_ind[1])):
                lv_bus = trafo_ind[1]
                shift_degree *= -1
            else:
                lv_bus = trafo_ind[0]

            buses_shifted_from_root = csgraph.breadth_first_order(
                G_tree, lv_bus, directed=True, return_predecessors=False)
            V_final[buses_shifted_from_root] *= np.exp(1j * np.pi / 180 *
                                                       shift_degree)

    # #----- output results to ppc ------
    ppci["et"] = time() - time_start  # pf time end

    bus, gen, branch = pfsoln(baseMVA, bus, gen, branch, Ybus, Yf, Yt, V_final,
                              ref)
    # bus, gen, branch = pfsoln_bfsw(baseMVA, bus, gen, branch, V_final, ref, pv, pq, BIBC, ysh_f,ysh_t,Iinj, Sbus)

    ppci["success"] = success

    ppci["bus"], ppci["gen"], ppci["branch"] = bus, gen, branch

    return ppci, success
예제 #8
0
def _make_bibc_bcbv(bus, branch, graph):
    """
    performs depth-first-search bus ordering and creates Direct Load Flow (DLF) matrix
    which establishes direct relation between bus current injections and voltage drops from each bus to the root bus

    :param ppc: matpower-type case data
    :return: DLF matrix DLF = BIBC * BCBV where
                    BIBC - Bus Injection to Branch-Current
                    BCBV - Branch-Current to Bus-Voltage
            ppc with bfs ordering
            original bus names bfs ordered (used to convert voltage array back to normal)
    """

    nobus = bus.shape[0]
    nobranch = branch.shape[0]

    # reference bus is assumed as root bus for a radial network
    refs = bus[bus[:, BUS_TYPE] == 3, BUS_I]
    norefs = len(refs)

    G = graph.copy()  # network graph

    # dictionary with impedance values keyed by branch tuple (frombus, tobus)
    # TODO use list or array, not both
    branches_lst = list(
        zip(branch[:, F_BUS].real.astype(int), branch[:,
                                                      T_BUS].real.astype(int)))
    branches_arr = branch[:, F_BUS:T_BUS + 1].real.astype(int)
    branches_ind_dict = dict(
        zip(zip(branches_arr[:, 0], branches_arr[:, 1]), range(0, nobranch)))
    branches_ind_dict.update(
        dict(
            zip(zip(branches_arr[:, 1], branches_arr[:, 0]),
                range(0, nobranch))))

    tap = branch[:, TAP]  # * np.exp(1j * np.pi / 180 * branch[:, SHIFT])
    z_ser = (branch[:, BR_R].real +
             1j * branch[:, BR_X].real) * tap  # series impedance
    z_brch_dict = dict(zip(branches_lst, z_ser))

    # initialization of lists for building sparse BIBC and BCBV matrices
    rowi_BIBC = []
    coli_BIBC = []
    data_BIBC = []
    data_BCBV = []

    buses_ordered_bfs_nets = []
    for ref in refs:
        # ordering buses according to breadth-first-search (bfs)
        buses_ordered_bfs, predecs_bfs = csgraph.breadth_first_order(
            G, ref, directed=False, return_predecessors=True)
        buses_ordered_bfs_nets.append(buses_ordered_bfs)
        branches_ordered_bfs = list(
            zip(predecs_bfs[buses_ordered_bfs[1:]], buses_ordered_bfs[1:]))
        G_tree = csgraph.breadth_first_tree(G, ref, directed=False)

        # if multiple networks get subnetwork branches
        if norefs > 1:
            branches_sub_mask = (
                np.in1d(branches_arr[:, 0], buses_ordered_bfs)
                & np.in1d(branches_arr[:, 1], buses_ordered_bfs))
            branches = np.sort(branches_arr[branches_sub_mask, :], axis=1)
        else:
            branches = np.sort(branches_arr, axis=1)

        # identify loops if graph is not a tree
        branches_loops = []
        if G_tree.nnz < branches.shape[0]:
            G_tree_nnzs = G_tree.nonzero()
            branches_tree = np.sort(np.array([G_tree_nnzs[0],
                                              G_tree_nnzs[1]]).T,
                                    axis=1)
            branches_loops = (
                set(zip(branches[:, 0], branches[:, 1])) -
                set(zip(branches_tree[:, 0], branches_tree[:, 1])))

        # #------ building BIBC and BCBV martrices ------
        # branches in trees
        brchi = 0
        for brch in branches_ordered_bfs:
            tree_down, predecs = csgraph.breadth_first_order(
                G_tree, brch[1], directed=True, return_predecessors=True)
            if len(tree_down) == 1:  # If at leaf
                pass
            if brch in z_brch_dict:
                z_br = z_brch_dict[brch]
            else:
                z_br = z_brch_dict[brch[::-1]]
            rowi_BIBC += [branches_ind_dict[brch]] * len(tree_down)
            coli_BIBC += list(tree_down)
            data_BCBV += [z_br] * len(tree_down)
            data_BIBC += [1] * len(tree_down)

        # branches from loops
        for loop_i, brch_loop in enumerate(branches_loops):
            path_lens, path_preds = csgraph.shortest_path(
                G_tree,
                directed=False,
                indices=brch_loop,
                return_predecessors=True)
            init, end = brch_loop
            loop = [end]
            while init != end:
                end = path_preds[0, end]
                loop.append(end)

            loop_size = len(loop)
            coli_BIBC += [nobus + loop_i] * loop_size
            for i in range(len(loop)):
                brch = (loop[i - 1], loop[i])
                if np.argwhere(buses_ordered_bfs == brch[0]) < np.argwhere(
                        buses_ordered_bfs == brch[1]):
                    brch_direct = 1
                else:
                    brch_direct = -1
                data_BIBC.append(brch_direct)

                if brch in branches_ind_dict:
                    rowi_BIBC.append(branches_ind_dict[brch])
                else:
                    rowi_BIBC.append(branches_ind_dict[brch[::-1]])

                if brch in z_brch_dict:
                    data_BCBV.append(z_brch_dict[brch] * brch_direct)
                else:
                    data_BCBV.append(z_brch_dict[brch[::-1]] * brch_direct)

                brchi += 1

    # construction of the BIBC matrix
    # column indices correspond to buses: assuming root bus is always 0 after ordering indices are subtracted by 1
    BIBC = csr_matrix((data_BIBC, (rowi_BIBC, np.array(coli_BIBC) - norefs)),
                      shape=(nobranch, nobranch))
    BCBV = csr_matrix((data_BCBV, (rowi_BIBC, np.array(coli_BIBC) - norefs)),
                      shape=(nobranch, nobranch)).transpose()

    if BCBV.shape[0] > nobus - 1:  # if nbrch > nobus - 1 -> network has loops
        DLF_loop = BCBV * BIBC
        # DLF = [A  M.T ]
        #       [M  N   ]
        A = DLF_loop[0:nobus - 1, 0:nobus - 1]
        M = DLF_loop[nobus - 1:, 0:nobus - 1]
        N = DLF_loop[nobus - 1:, nobus - 1:].A
        # considering the fact that number of loops is relatively small, N matrix is expected to be small and dense
        # ...in that case dense version is more efficient, i.e. N is transformed to dense and
        # inverted using sp.linalg.inv(N)
        DLF = A - M.T * csr_matrix(sp.linalg.inv(N)) * M  # Kron's Reduction
    else:  # no loops -> radial network
        DLF = BCBV * BIBC

    return DLF, buses_ordered_bfs_nets
예제 #9
0
def detectSoftDihedrals(mol, equivalent_atoms):
    bonds = mol.bonds
    natoms = mol.coords.shape[0]
    conn = np.zeros((natoms, natoms), dtype=np.bool)

    # Make a connectivity matrix
    # print(natoms)
    for b in bonds:
        conn[b[0], b[1]] = True
        conn[b[1], b[0]] = True

    # Iterate over each of the dihedrals, checking to see which partition the graph
    possible_soft = []
    for d in mol.dihedrals:
        a0 = d[0]
        a1 = d[1]
        a2 = d[2]
        a3 = d[3]
        c = conn.copy()
        c[a1, a2] = c[a2, a1] = False
        left = sp.breadth_first_tree(c, a1, directed=False).indices.flatten()
        right = sp.breadth_first_tree(c, a2, directed=False).indices.flatten()
        left = np.unique(left)
        right = np.unique(right)

        c = conn.copy()
        c[a0, a1] = c[a1, a0] = False
        c[a1, a2] = c[a2, a1] = False
        c[a3, a2] = c[a2, a3] = False
        left_1 = sp.breadth_first_tree(c, a0, directed=False)
        left_1 = left_1.indices
        left_1 = left_1.flatten()

        right_1 = sp.breadth_first_tree(c, a3, directed=False)
        right_1 = right_1.indices
        right_1 = right_1.flatten()

        left_1 = np.unique(left_1)
        right_1 = np.unique(right_1)
        #    print("MARK")
        #    print(len(left_1))
        #    print(len(right_1))

        if not (a2 in left) and not (a1 in right):
            possible_soft.append(
                SoftDihedral(d, left, right, left_1, right_1, []))

    final_soft = []
    e = mol.element
    for d in possible_soft:
        #        print("Possible soft")
        #        print(d.atoms)
        #        print(d.left)
        #        print(d.right)
        #        print("%s %s %s" % ( e[left[0]], e[left[1]], e[left[2]] ) )
        #        print("%s %s %s" % ( e[right[0]], e[right[1]], e[right[2]] ) )
        a1 = d.atoms[1]
        a2 = d.atoms[2]
        #        print( "a1 %d %s" % (a1, e[a1]))
        #        print( "a2 %d %s" % (a2, e[a2]))
        left = d.left
        right = d.right

        # Exclude trivial dihedrals with just one H atom on a side
        #   if (len(left) == 1)   and (e[left[0]] =='H') : continue
        #   if (len(right) == 1)  and (e[right[0]]=='H') : continue

        # Exclude methyls
        if len(left) == 3:
            if e[a1] == 'C' and e[left[0]] == 'H' and e[left[1]] == 'H' and e[
                    left[2]] == 'H':
                continue
        if len(right) == 3:
            if e[a2] == 'C' and e[right[0]] == 'H' and e[
                    right[1]] == 'H' and e[right[2]] == 'H':
                continue
        found = False
        # check to see if the torsional pair of atoms are already included in the list.
        for g in final_soft:
            f = g.atoms
            if f[1] == a1 and f[2] == a2:
                found = True
                break
            if f[2] == a1 and f[1] == a2:
                found = True
                break
        if not found:
            final_soft.append(d)
        #  if equivalent_atoms:
    final_soft = remove_equivalents(mol, final_soft, equivalent_atoms)

    idx = 0
    for t in final_soft:
        print("Dihedral %d: %d-%d-%d-%d" %
              (idx, t.atoms[0], t.atoms[1], t.atoms[2], t.atoms[3]))
        if len(t.equivalents):
            print(" Has equivalent dihedrals through symmetry: ")
            for s in t.equivalents:
                print(" Dihedral %d-%d-%d-%d" % (s[0], s[1], s[2], s[3]))
        idx += 1
    return final_soft
예제 #10
0
dist_matrix, predecessors= sp.sparse.csgraph.johnson(csgraph=grafo, 
directed=False, indices=0,return_predecessors=True)




#%%

#https://docs.scipy.org/doc/scipy/scipy-ref-1.2.1.pdf
from scipy.sparse import csr_matrix

from scipy.sparse.csgraph import breadth_first_tree #arvore de busca em largura

X = csr_matrix([[0,8,0,3],[0,0,2,5],[0,0,0,6],[0,0,0,0]])

Tcsr=breadth_first_tree(X,0, directed=False)

Tcsr.toarray().astype(int)#matriz de adjascencia

print(Tcsr)#grafo dA Busca em largura

#%%

#https://www.tutorialspoint.com/scipy/
G_dense = np.array([ [0, 2, 1],
                     [2, 0, 0],
                     [1, 0, 0] ])
                     
G_masked = np.ma.masked_values(G_dense, 0)
from scipy.sparse import csr_matrix
예제 #11
0
def align_normal_space_eigvecs(dataset_dict,
                               N_local_neighborhood_mst=2,
                               N_epoch=301,
                               is_using_separate_opt_per_data_point=False,
                               L_window_past_mean_losses=10,
                               rand_seed=38):
    print('Beginning Normal Space (Eigenvector) Bases Alignment...')
    N_data = dataset_dict['data'].shape[0]
    kd_tree = cKDTree(data=dataset_dict['data'])
    is_dag_extraction_successful = False
    while not is_dag_extraction_successful:
        print(
            'Constructing a sparse nearest neighbor matrix w/ N_local_neighborhood_mst = %d...'
            % N_local_neighborhood_mst)
        start_time = time.perf_counter()
        sparse_matrix_index_I = list()
        sparse_matrix_index_J = list()
        sparse_matrix_value_V = list()
        for i in range(N_data):
            # compute nearest neighbors of size N_local_neighborhood_mst (exclude self):
            [dists, indices] = kd_tree.query(dataset_dict['data'][i],
                                             k=N_local_neighborhood_mst + 1)
            indices = indices[1:]
            dists = dists[1:]
            for j, dist in zip(indices, dists):
                sparse_matrix_index_I.append(i)
                sparse_matrix_index_J.append(j)
                sparse_matrix_value_V.append(dist)
        # sparse nearest neighbor matrix:
        sparse_nearneigh_graph_matrix = coo_matrix(
            (sparse_matrix_value_V,
             (sparse_matrix_index_I, sparse_matrix_index_J)),
            shape=(N_data, N_data))
        # print(sparse_nearneigh_graph_matrix.toarray())
        print('The sparse nearest neighbor matrix constructed in %f seconds.' %
              (time.perf_counter() - start_time))

        # Minimum Spanning Tree of the On-Manifold data points:
        print(
            'Constructing a Minimum Spanning Tree of the On-Manifold data points...'
        )
        start_time = time.perf_counter()
        mst = minimum_spanning_tree(sparse_nearneigh_graph_matrix)
        # print(mst.toarray())
        print(
            'The Minimum Spanning Tree of the On-Manifold data points constructed in %f seconds.'
            % (time.perf_counter() - start_time))

        # Directed Acyclic Graph (DAG) of the On-Manifold data points, with data index 0 as the root:
        print(
            'Constructing a Directed Acyclic Graph (DAG) of the On-Manifold data points...'
        )
        start_time = time.perf_counter()
        root_idx = 0
        dag = breadth_first_tree(mst, root_idx, directed=False)
        # print(dag.toarray())
        print(
            'The Directed Acyclic Graph (DAG) of the On-Manifold data points constructed in %f seconds.'
            % (time.perf_counter() - start_time))
        # some checks to make sure that all On-Manifold data points can be traversed (i.e. connected):
        [N_components, _] = connected_components(dag,
                                                 directed=True,
                                                 connection='weak',
                                                 return_labels=True)
        if N_components == 1:
            is_dag_extraction_successful = True
        else:
            N_local_neighborhood_mst += 1

    # Extract the Parent-Child Nodes relationships:
    print('Extracting the Parent-Child Nodes relationships...')
    start_time = time.perf_counter()
    (dag_rows, dag_cols) = dag.nonzero()
    children_nodes_dict = dict()
    parent_nodes_list = [i for i in range(N_data)]
    for parent, child in zip(dag_rows, dag_cols):
        if parent not in children_nodes_dict:
            children_nodes_dict[parent] = [child]
        else:
            children_nodes_dict[parent].append(child)
        parent_nodes_list[child] = parent
    print('The Parent-Child Nodes relationships extracted in %f seconds.' %
          (time.perf_counter() - start_time))

    possible_coord_frames = np.stack(
        [dataset_dict['cov_nullspace'], dataset_dict['cov_nullspace']])
    # Flip the first normal space eigenvector of the 2nd copy of the eigenvector set.
    # This will make the 1st copy of the eigenvector set form a matrix with pseudo-determinant +1,
    # while the 2nd copy of the eigenvector set form a matrix with pseudo-determinant -1 (or the other way around).
    # This means that either one of the 1st copy or the 2nd copy (NOT BOTH)
    # will form a pseudo-SO(n) coordinate frame
    # (n is the number of eigenvectors in the normal space).
    possible_coord_frames[1, :, :, 0] *= -1
    dim_ambient = dataset_dict['cov_nullspace'].shape[1]
    n = dataset_dict['cov_nullspace'].shape[2]

    # perform the (Iterative) Orthogonal Subspace Alignment (OSA) between Parent-Child Nodes:
    print(
        'Performing the (Iterative) Orthogonal Subspace Alignment (OSA) between Parent-Child Nodes...'
    )
    start_time = time.perf_counter()
    child2parent_osa_losses = np.zeros(shape=[N_data, 2, 2])
    child2parent_osa_SOn_transforms = np.zeros(shape=[N_data, 2, 2, n, n])
    child2parent_osa_result = np.zeros(shape=[N_data, 2, 2, dim_ambient, n])
    for si in range(
            2
    ):  # si: source index (either the 1st or 2nd copy of the eigenvector set)
        for di in range(
                2
        ):  # di: destination index (either the 1st or 2nd copy of the eigenvector set)
            source_coord_frames = possible_coord_frames[si, :, :, :]
            dest_coord_frames = possible_coord_frames[di,
                                                      parent_nodes_list, :, :]
            SOn = SpecialOrthogonalGroups(n=n,
                                          N_batch=N_data,
                                          rand_seed=rand_seed)
            window_past_mean_losses_list = list()
            for epoch in tqdm.tqdm(range(N_epoch)):
                # search for SO(n) transform that if applied (post-multiplied) to
                # source_coord_frames will result in coordinate frames
                # which is as close (aligned) as possible to the dest_coord_frames:
                [
                    current_losses, current_mean_loss, SOn_transforms,
                    alignment_result
                ] = SOn.train(inputs=source_coord_frames,
                              target_outputs=dest_coord_frames,
                              learning_rate=0.01,
                              is_using_separate_opt_per_data_point=
                              is_using_separate_opt_per_data_point)
                if epoch % 10 == 0:
                    print('Source Idx %d, Dest Idx %d, Epoch %2d: ' %
                          (si, di, epoch))
                    print('           mean_loss = ', current_mean_loss)
                    print('           loss[:10] = ', current_losses[:10])
                if len(window_past_mean_losses_list
                       ) >= L_window_past_mean_losses:
                    np_window_past_mean_losses = np.array(
                        window_past_mean_losses_list)
                    mean_past_mean_losses = np.mean(np_window_past_mean_losses)
                    std_past_mean_losses = np.std(np_window_past_mean_losses)
                    if epoch % 10 == 0:
                        print('           mean_past_mean_losses = ',
                              mean_past_mean_losses)
                        print('           std_past_mean_losses = ',
                              std_past_mean_losses)
                    if std_past_mean_losses < 10.e-6:
                        print(
                            'Terminated at: Source Idx %d, Dest Idx %d, Epoch %2d: '
                            % (si, di, epoch))
                        break
                    window_past_mean_losses_list.pop(0)
                window_past_mean_losses_list.append(current_mean_loss)
            child2parent_osa_losses[:, si, di] = np.array(current_losses)
            child2parent_osa_SOn_transforms[:, si, di, :, :] = SOn_transforms
            child2parent_osa_result[:, si, di, :, :] = alignment_result
    print(
        'The (Iterative) Orthogonal Subspace Alignment (OSA) between Parent-Child Nodes is completed in %f seconds.'
        % (time.perf_counter() - start_time))

    # now aggregate/compound the SO(n) transformations from any nodes in the DAG to the root node:
    print(
        'Aggregating/compounding the SO(n) transformations from any nodes in the DAG to the root node...'
    )
    start_time = time.perf_counter()
    first_or_2nd_eigvec_set = np.array(
        [0 if i == root_idx else -1 for i in range(N_data)])
    compound_osa_SOn_to_root = np.empty(shape=[N_data, n, n])
    compound_osa_SOn_to_root[:, :, :] = np.NaN
    compound_osa_SOn_to_root[root_idx, :, :] = np.eye(n)
    selected_coord_frames = np.zeros_like(dataset_dict['cov_nullspace'])
    selected_coord_frames[root_idx, :, :] = dataset_dict['cov_nullspace'][
        root_idx, :, :]
    # do breadth-first-search/traversal:
    bfs_queue = list()
    bfs_queue += children_nodes_dict[root_idx]
    while len(bfs_queue) > 0:
        node_idx = bfs_queue.pop(0)
        if node_idx in children_nodes_dict:
            bfs_queue += children_nodes_dict[node_idx]
        parent_idx = parent_nodes_list[node_idx]
        first_or_2nd_eigvec_set[node_idx] = np.argmin(
            child2parent_osa_losses[node_idx, :,
                                    first_or_2nd_eigvec_set[parent_idx]])
        selected_coord_frames[node_idx, :, :] = possible_coord_frames[
            first_or_2nd_eigvec_set[node_idx], node_idx, :, :]
        compound_osa_SOn_to_root[node_idx, :, :] = (
            child2parent_osa_SOn_transforms[
                node_idx, first_or_2nd_eigvec_set[node_idx],
                first_or_2nd_eigvec_set[parent_idx], :, :]
            @ compound_osa_SOn_to_root[parent_idx, :, :])
    assert (np.all(first_or_2nd_eigvec_set >= 0)
            and np.all(first_or_2nd_eigvec_set <= 1))
    assert (not np.any(np.isnan(compound_osa_SOn_to_root)))
    osa_result = (selected_coord_frames @ compound_osa_SOn_to_root)

    # some comparison of the effectiveness of the IOSA:
    SOn = SpecialOrthogonalGroups(n=n, N_batch=N_data, rand_seed=rand_seed)
    child2parent_orig_losses = SOn.loss(target_y=np.float32(
        np.stack([
            dataset_dict['cov_nullspace'][parent_nodes_list[i], :, :]
            for i in range(N_data)
        ])),
                                        predicted_y=np.float32(
                                            dataset_dict['cov_nullspace']))
    child2parent_orig_losses = child2parent_orig_losses.numpy()
    print("Before Alignment: Orthogonal Subspace Alignment Losses[:10] = ",
          child2parent_orig_losses[:10])
    print("                                                       Mean = ",
          np.mean(child2parent_orig_losses))
    print("                                                       Std. = ",
          np.std(child2parent_orig_losses))
    child2parent_osa_opt_losses = SOn.loss(target_y=np.float32(
        np.stack(
            [osa_result[parent_nodes_list[i], :, :] for i in range(N_data)])),
                                           predicted_y=np.float32(osa_result))
    child2parent_osa_opt_losses = child2parent_osa_opt_losses.numpy()
    print("After Alignment: Orthogonal Subspace Alignment Losses[:10] = ",
          child2parent_osa_opt_losses[:10])
    print("                                                      Mean = ",
          np.mean(child2parent_osa_opt_losses))
    print("                                                      Std. = ",
          np.std(child2parent_osa_opt_losses))

    print(
        'The SO(n) transformations from any nodes in the DAG to the root node is aggregated/compounded in %f seconds.'
        % (time.perf_counter() - start_time))

    return osa_result
예제 #12
0
def get_bond_matrix(sbu):
    """Guesses the bond order in neighbourlist based on covalent radii

    the radii for BO > 1 are extrapolated by removing 0.1 Angstroms by order
    see Beatriz Cordero, Veronica Gomez, Ana E. Platero-Prats, Marc Reves,
    Jorge Echeverria, Eduard Cremades, Flavia Barragan and Santiago Alvarez
    (2008). "Covalent radii revisited". Dalton Trans. (21): 2832-2838
    http://dx.doi.org/10.1039/b801115j

    Parameters
    ----------
    sbu: ase.Atoms
        the molecule from which to guess
        the bonding information

    Returns
    -------
    bonds: numpy.array()
        the bond orders matrix
    """
    # first guess
    bonds = numpy.zeros((len(sbu), len(sbu)))
    symbols = numpy.array(sbu.get_chemical_symbols())
    numbers = numpy.array(sbu.get_atomic_numbers())
    positions = numpy.array(sbu.get_positions())
    BO1 = numpy.array([covalent_radii[n] if n > 0 else 0.7 for n in numbers])
    BO2 = BO1 - 0.15
    BO3 = BO2 - 0.15
    nl1 = NeighborList(
        cutoffs=BO1,
        bothways=True,
        self_interaction=False,
        skin=0.1)
    nl2 = NeighborList(
        cutoffs=BO2,
        bothways=True,
        self_interaction=False,
        skin=0.1)
    nl3 = NeighborList(
        cutoffs=BO3,
        bothways=True,
        self_interaction=False,
        skin=0.1)
    nl1.update(sbu)
    nl2.update(sbu)
    nl3.update(sbu)
    for atom in sbu:
        i1, _ = nl1.get_neighbors(atom.index)
        i2, _ = nl2.get_neighbors(atom.index)
        i3, _ = nl3.get_neighbors(atom.index)
        bonds[atom.index, i1] = 1.0
        bonds[atom.index, i2] = 2.0
        bonds[atom.index, i3] = 3.0
    # cleanup with particular cases
    # identify particular atoms
    hydrogens = numpy.where(symbols == "H")[0]
    metals = numpy.where(is_metal(symbols))[0]
    alkali = numpy.where(is_alkali(symbols))[0]
    # the rest is dubbed "organic"
    organic = numpy.ones(bonds.shape)
    organic[hydrogens, :] = False
    organic[metals, :] = False
    organic[alkali, :] = False
    organic[:, hydrogens] = False
    organic[:, metals] = False
    organic[:, alkali] = False
    organic = numpy.where(organic)[0]
    # Hydrogen has BO of 1
    bonds_h = bonds[hydrogens]
    bonds_h[bonds_h > 1.0] = 1.0
    bonds[hydrogens, :] = bonds_h
    bonds[:, hydrogens] = bonds_h.T
    # Metal-Metal bonds: if no special case, nominal bond
    ix = numpy.ix_(metals, metals)
    bix = bonds[ix]
    bix[numpy.nonzero(bix)] = 0.25
    bonds[ix] = bix
    # no H-Metal bonds
    ix = numpy.ix_(metals, hydrogens)
    bonds[ix] = 0.0
    ix = numpy.ix_(hydrogens, metals)
    bonds[ix] = 0.0
    # no alkali-alkali bonds
    ix = numpy.ix_(alkali, alkali)
    bonds[ix] = 0.0
    # no alkali-metal bonds
    ix = numpy.ix_(metals, alkali)
    bonds[ix] = 0.0
    ix = numpy.ix_(alkali, metals)
    bonds[ix] = 0.0
    # metal-organic is coordination bond
    ix = numpy.ix_(metals, organic)
    bix = bonds[ix]
    bix[numpy.nonzero(bix)] = 0.5
    bonds[ix] = bix
    ix = numpy.ix_(organic, metals)
    bix = bonds[ix]
    bix[numpy.nonzero(bix)] = 0.5
    bonds[ix] = bix
    # aromaticity and rings
    rings = []
    # first, use the compressed sparse graph object
    # we only care about organic bonds and not hydrogens
    graph_bonds = numpy.array(bonds > 0.99, dtype=float)
    graph_bonds[hydrogens, :] = 0.0
    graph_bonds[:, hydrogens] = 0.0
    graph = csgraph.csgraph_from_dense(graph_bonds)
    for sg in graph.indices:
        subgraph = graph[sg]
        for i, j in combinations(subgraph.indices, 2):
            t0 = csgraph.breadth_first_tree(graph, i_start=i, directed=False)
            t1 = csgraph.breadth_first_tree(graph, i_start=j, directed=False)
            t0i = t0.indices
            t1i = t1.indices
            ring = sorted(set(list(t0i) + list(t1i) + [i, j, sg]))
            # some conditions
            seen = (ring in rings)
            isring = (sorted(t0i[1:]) == sorted(t1i[1:]))
            bigenough = (len(ring) >= 5)
            smallenough = (len(ring) <= 10)
            if isring and not seen and bigenough and smallenough:
                rings.append(ring)
    # we now have a list of all the shortest rings within
    # the molecular graph. If planar, the ring might be aromatic
    aromatic_epsilon = 0.1
    aromatic = []
    for ring in rings:
        homocycle = (symbols[ring] == "C").all()
        heterocycle = numpy.in1d(
            symbols[ring], numpy.array(["C", "S", "N", "O"])).all()
        if (homocycle and (len(ring) % 2) == 0) or heterocycle:
            ring_positions = positions[ring]
            # small function for coplanarity
            dets = [numpy.linalg.det(numpy.array(x[:3]) - x[3])
                    for x in combinations(ring_positions, 4)]
            coplanar = all(dets < aromatic_epsilon)
            if coplanar:
                aromatic.append(ring)
    # aromatic bond fixing
    aromatic = numpy.array(aromatic).ravel()
    ix = numpy.ix_(aromatic, aromatic)
    bix = bonds[ix]
    bix[numpy.nonzero(bix)] = 1.5
    bonds[ix] = bix
    # hydrogen bonds
    # TODO
    return bonds
예제 #13
0
    return csgraph.csgraph_from_dense(g_dense).astype(np.int64)


def count_bags(graph, irow):
    row = graph.getrow(irow)
    inds = row.indices
    data = row.data
    if len(inds) > 0:
        res = np.sum(data)
        for k in range(len(inds)):
            res += data[k] * count_bags(graph, inds[k])
        return res
    else:
        return 0


if __name__ == "__main__":

    bag_rules = parse_input()
    bags = list(bag_rules.keys())

    g = create_graph(bag_rules)

    # Part I
    btree = csgraph.breadth_first_tree(g.T, bags.index('shiny gold'))
    print(btree.nnz)

    # Part II
    print(count_bags(g, bags.index('shiny gold')))
예제 #14
0
def detectSoftDihedrals(mol, equivalent_atoms):
    bonds = mol.bonds
    natoms = mol.coords.shape[0]
    conn = np.zeros((natoms, natoms), dtype=np.bool)

    # Make a connectivity matrix
    # print(natoms)
    for b in bonds:
        conn[b[0], b[1]] = True
        conn[b[1], b[0]] = True

    # Iterate over each of the dihedrals, checking to see which partition the graph
    possible_soft = []
    for d in mol.dihedrals:
        a0 = d[0]
        a1 = d[1]
        a2 = d[2]
        a3 = d[3]
        c = conn.copy()
        c[a1, a2] = c[a2, a1] = False
        left = sp.breadth_first_tree(c, a1, directed=False).indices.flatten()
        right = sp.breadth_first_tree(c, a2, directed=False).indices.flatten()
        left = np.unique(left)
        right = np.unique(right)

        c = conn.copy()
        c[a0, a1] = c[a1, a0] = False
        c[a1, a2] = c[a2, a1] = False
        c[a3, a2] = c[a2, a3] = False
        left_1 = sp.breadth_first_tree(c, a0, directed=False)
        left_1 = left_1.indices
        left_1 = left_1.flatten()

        right_1 = sp.breadth_first_tree(c, a3, directed=False)
        right_1 = right_1.indices
        right_1 = right_1.flatten()

        left_1 = np.unique(left_1)
        right_1 = np.unique(right_1)
        #    print("MARK")
        #    print(len(left_1))
        #    print(len(right_1))

        if not (a2 in left) and not (a1 in right):
            possible_soft.append(SoftDihedral(d, left, right, left_1, right_1, []))

    final_soft = []
    e = mol.element
    for d in possible_soft:
#        print("Possible soft")
#        print(d.atoms)
#        print(d.left)
#        print(d.right)
#        print("%s %s %s" % ( e[left[0]], e[left[1]], e[left[2]] ) )
#        print("%s %s %s" % ( e[right[0]], e[right[1]], e[right[2]] ) )
        a1 = d.atoms[1]
        a2 = d.atoms[2]
#        print( "a1 %d %s" % (a1, e[a1]))
#        print( "a2 %d %s" % (a2, e[a2]))
        left = d.left
        right = d.right
   

        # Exclude trivial dihedrals with just one H atom on a side
        #   if (len(left) == 1)   and (e[left[0]] =='H') : continue
        #   if (len(right) == 1)  and (e[right[0]]=='H') : continue

        # Exclude methyls
        if len(left) == 3:
            if e[a1] == 'C' and e[left[0]] == 'H' and e[left[1]] == 'H' and e[left[2]] == 'H':
                continue
        if len(right) == 3:
            if e[a2] == 'C' and e[right[0]] == 'H' and e[right[1]] == 'H' and e[right[2]] == 'H':
                continue
        found = False
        # check to see if the torsional pair of atoms are already included in the list.
        for g in final_soft:
            f = g.atoms
            if f[1] == a1 and f[2] == a2:
                found = True
                break
            if f[2] == a1 and f[1] == a2:
                found = True
                break
        if not found:
            final_soft.append(d)
        #  if equivalent_atoms:
    final_soft = remove_equivalents(mol, final_soft, equivalent_atoms)

    #idx = 0
    #for t in final_soft:
    #    print("Dihedral %d: %d-%d-%d-%d" % (idx, t.atoms[0], t.atoms[1], t.atoms[2], t.atoms[3]))
    #    if len(t.equivalents):
    #        print(" Has equivalent dihedrals through symmetry: ")
    #        for s in t.equivalents:
    #            print(" Dihedral %d-%d-%d-%d" % (s[0], s[1], s[2], s[3]))
    #    idx += 1

    return final_soft