Esempio n. 1
0
    def infomax_loss(self, prop_emb_list, neg_prop_emb_list):
        prop_result = np.concatenate(prop_emb_list, axis=1)
        if self.svd:
            prop_result = get_embedding_dense(prop_result, self.dim)

        def sigmoid(x):
            return 1.0 / (1 + np.exp(-x))

        pos_glb = prop_result.mean(0)
        pos_info = sigmoid(pos_glb.dot(prop_result.T))
        pos_loss = np.mean(np.log(pos_info)).mean()

        neg_loss = 0
        neg_step = 1
        for _ in range(neg_step):
            neg_prop_result = np.concatenate(neg_prop_emb_list, axis=1)
            if self.svd:
                neg_prop_result = get_embedding_dense(neg_prop_result,
                                                      self.dim)

            neg_info = sigmoid(pos_glb.dot(neg_prop_result.T))
            neg_loss += np.mean(np.log(1 - neg_info)).mean()
            random.shuffle(neg_prop_emb_list)

        return -(pos_loss + neg_loss) / (1 + neg_step)
Esempio n. 2
0
    def __call__(self, emb, adj):
        if len(self.filter_types) == 1 and self.filter_types[0] == "identity":
            return emb

        dim = emb.shape[1]
        prop_result = []
        for tp in self.filter_types:
            prop_result.append(propagate(adj, emb, tp))
        prop_result_emb = np.concatenate(prop_result, axis=1)
        if self.svd:
            prop_result_emb = get_embedding_dense(prop_result_emb, dim)
        return prop_result_emb
Esempio n. 3
0
    def __call__(self, emb, adj):
        self.init_data(emb, adj)
        study = optuna.create_study()
        study.optimize(self.target_func, n_jobs=self.n_workers, n_trials=self.max_evals)
        best_params = study.best_params

        best_result = self.prop(best_params)[0]
        best_result = np.concatenate(best_result, axis=1)
        print(f"best parameters: {best_params}")

        if self.svd:
            best_result = get_embedding_dense(best_result, self.dim)
        return best_result
Esempio n. 4
0
    def _chebyshev_gaussian(self,
                            A,
                            a,
                            order=5,
                            mu=0.5,
                            s=0.2,
                            plus=False,
                            nn=False):
        # NE Enhancement via Spectral Propagation
        num_node = a.shape[0]

        if order == 1:
            return a

        A = sp.eye(num_node) + A
        DA = preprocessing.normalize(A, norm="l1")
        L = sp.eye(num_node) - DA

        M = L - mu * sp.eye(num_node)

        Lx0 = a
        Lx1 = M.dot(a)
        Lx1 = 0.5 * M.dot(Lx1) - a

        conv = iv(0, s) * Lx0
        conv -= 2 * iv(1, s) * Lx1
        for i in range(2, order):
            Lx2 = M.dot(Lx1)
            Lx2 = (M.dot(Lx2) - 2 * Lx1) - Lx0
            #         Lx2 = 2*L.dot(Lx1) - Lx0
            if i % 2 == 0:
                conv += 2 * iv(i, s) * Lx2
            else:
                conv -= 2 * iv(i, s) * Lx2
            Lx0 = Lx1
            Lx1 = Lx2
            del Lx2
        emb = mm = conv
        if not plus:
            mm = A.dot(a - conv)
        if not nn:
            emb = get_embedding_dense(mm, self.dimension)
        return emb