Ejemplo n.º 1
0
def preprocess_adjs(adjs, method="ase"):
    adjs = [pass_to_ranks(a) for a in adjs]
    adjs = [a + 1 / a.size for a in adjs]
    if method == "ase":
        adjs = [augment_diagonal(a) for a in adjs]
    elif method == "lse":
        adjs = [to_laplace(a) for a in adjs]
    return adjs
Ejemplo n.º 2
0
def reg_omni(adjs):
    adjs = [a + 1 / (len(lp_inds)**2) for a in adjs]
    adjs = [augment_diagonal(a) for a in adjs]
    omni = OmnibusEmbed(n_components=4, check_lcc=False, n_iter=10)
    embed = omni.fit_transform(adjs)
    embed = np.concatenate(embed, axis=-1)
    embed = embed[2:]  # TODO
    embed = np.concatenate(embed, axis=0)
    return embed
Ejemplo n.º 3
0
    def _embed(self, adj=None):
        if adj is None:
            adj = self.adj
        # TODO look into PTR at this level as well
        # lp_inds, rp_inds = get_paired_inds(self.meta)
        lp_inds = self.left_pair_inds
        rp_inds = self.right_pair_inds

        embed_adj = pass_to_ranks(adj)
        if self.embed == "ase":
            embedder = AdjacencySpectralEmbed(
                n_components=self.n_components, n_elbows=self.n_elbows
            )
            embed = embedder.fit_transform(embed_adj)
        elif self.embed == "lse":
            embedder = LaplacianSpectralEmbed(
                n_components=self.n_components,
                n_elbows=self.n_elbows,
                regularizer=self.regularizer,
            )
            embed = embedder.fit_transform(embed_adj)
        elif self.embed == "unscaled_ase":
            embed_adj = pass_to_ranks(adj)
            embed_adj = augment_diagonal(embed_adj)
            embed = selectSVD(
                embed_adj, n_components=self.n_components, n_elbows=self.n_elbows
            )
            embed = (embed[0], embed[2].T)

        X = np.concatenate(embed, axis=1)

        fraction_paired = (len(lp_inds) + len(rp_inds)) / len(self.root_inds)
        print(f"Learning transformation with {fraction_paired} neurons paired")
        R, _ = orthogonal_procrustes(X[lp_inds], X[rp_inds])
        X[self.left_inds] = X[self.left_inds] @ R

        if self.normalize:
            row_sums = np.sum(X, axis=1)
            X /= row_sums[:, None]

        return X
Ejemplo n.º 4
0
    def _embed(self, adj=None):
        if adj is None:
            adj = self.adj

        lp_inds = self.left_pair_inds
        rp_inds = self.right_pair_inds

        embed_adj = pass_to_ranks(adj)  # TODO PTR here?
        if self.plus_c:
            embed_adj += 1 / adj.size
        if self.embed == "ase":
            embedder = AdjacencySpectralEmbed(n_components=self.n_components,
                                              n_elbows=self.n_elbows)
            embed = embedder.fit_transform(embed_adj)
        elif self.embed == "lse":
            embedder = LaplacianSpectralEmbed(
                n_components=self.n_components,
                n_elbows=self.n_elbows,
                regularizer=self.regularizer,
            )
            embed = embedder.fit_transform(embed_adj)
        elif self.embed == "unscaled_ase":
            embed_adj = augment_diagonal(embed_adj)
            embed = selectSVD(embed_adj,
                              n_components=self.n_components,
                              n_elbows=self.n_elbows)
            embed = (embed[0], embed[2].T)

        X = np.concatenate(embed, axis=1)

        fraction_paired = (len(lp_inds) + len(rp_inds)) / len(self.root_inds)
        print(f"Learning transformation with {fraction_paired} neurons paired")

        X = self._procrustes(X)

        if self.normalize:
            row_norms = np.linalg.norm(X, axis=1)
            X /= row_norms[:, None]

        return X
Ejemplo n.º 5
0
def preprocess_adjs(adjs, method="ase"):
    """Preprocessing necessary prior to embedding a graph, opetates on a list

    Parameters
    ----------
    adjs : list of adjacency matrices
        [description]
    method : str, optional
        [description], by default "ase"

    Returns
    -------
    [type]
        [description]
    """
    adjs = [pass_to_ranks(a) for a in adjs]
    adjs = [a + 1 / a.size for a in adjs]
    if method == "ase":
        adjs = [augment_diagonal(a) for a in adjs]
    elif method == "lse":  # haven't really used much. a few params to look at here
        adjs = [to_laplace(a) for a in adjs]
    return adjs
Ejemplo n.º 6
0
def preprocess_adjs(adjs):
    adjs = [pass_to_ranks(a) for a in adjs]
    adjs = [a + 1 / a.size for a in adjs]
    adjs = [augment_diagonal(a) for a in adjs]
    return adjs
Ejemplo n.º 7
0
lp_inds, rp_inds = get_paired_inds(meta)
left_inds = meta[meta["left"]]["inds"]
right_inds = meta[meta["right"]]["inds"]

# %% Load and preprocess all graphs
graph_types = ["Gad", "Gaa", "Gdd", "Gda"]
adjs = []
for g in graph_types:
    temp_mg = load_metagraph(g, version="2020-04-01")
    temp_mg.reindex(mg.meta.index, use_ids=True)
    temp_adj = temp_mg.adj
    adjs.append(temp_adj)

embed_adjs = [pass_to_ranks(a) for a in adjs]
embed_adjs = [a + 1 / a.size for a in embed_adjs]
embed_adjs = [augment_diagonal(a) for a in embed_adjs]

# %% [markdown]
# ##


def omni_procrust_svd(embed_adjs):
    omni = OmnibusEmbed(n_components=None, check_lcc=False)
    joint_embed = omni.fit_transform(embed_adjs)
    cat_embed = np.concatenate(joint_embed, axis=-1)
    # print(f"Omni concatenated embedding shape: {cat_embed.shape}")
    for e in cat_embed:
        e[left_inds] = e[left_inds] @ orthogonal_procrustes(
            e[lp_inds], e[rp_inds])[0]
    cat_embed = np.concatenate(cat_embed, axis=-1)
    U, S, Vt = selectSVD(cat_embed, n_elbows=3)