Ejemplo n.º 1
0
    def process(self):
        data_list = []
        self.num_nearest_bone = 5
        i = 0.0
        for v_filename in self.raw_paths:
            print('preprecessing data complete: {:.4f}%'.format(100 * i / len(self.raw_paths)))
            i += 1.0
            v = np.loadtxt(v_filename)
            v = torch.from_numpy(v).float()
            tpl_e = np.loadtxt(v_filename.replace('_v.txt', '_tpl_e.txt')).T
            geo_e = np.loadtxt(v_filename.replace('_v.txt', '_geo_e.txt')).T
            tpl_e = torch.from_numpy(tpl_e).long()
            geo_e = torch.from_numpy(geo_e).long()
            tpl_e, _ = add_self_loops(tpl_e, num_nodes=v.size(0))
            geo_e, _ = add_self_loops(geo_e, num_nodes=v.size(0))
            skin_input, skin_nn, skin_label, loss_mask = self.load_skin(v_filename.replace('_v.txt', '_skin.txt'))
            
            skin_input = torch.from_numpy(skin_input).float()
            skin_label = torch.from_numpy(skin_label).float()
            skin_nn = torch.from_numpy(skin_nn).long()
            loss_mask = torch.from_numpy(loss_mask).long()
            num_skin = len(skin_input)

            name = int(v_filename.split('/')[-1].split('_')[0])
            data_list.append(Data(x=v[:, 3:6], pos=v[:, 0:3], skin_input=skin_input, skin_label=skin_label,
                                  skin_nn=skin_nn, loss_mask=loss_mask, num_skin=num_skin, name=name,
                                  tpl_edge_index=tpl_e, geo_edge_index=geo_e))
        data, slices = self.collate(data_list)
        torch.save((data, slices), self.processed_paths[0])
Ejemplo n.º 2
0
    def diffusion_matrix_exact(self, edge_index, edge_weight, num_nodes,
                               method, **kwargs):
        r"""Calculate the (dense) diffusion on a given sparse graph.
        Note that these exact variants are not scalable. They densify the
        adjacency matrix and calculate either its inverse or its matrix
        exponential.
        Args:
            edge_index (LongTensor): The edge indices.
            edge_weight (Tensor): One-dimensional edge weights.
            num_nodes (int): Number of nodes.
            method (str): Diffusion method:
                1. :obj:`"ppr"`: Use personalized PageRank as diffusion.
                   Additionally expects the parameter:
                   - **alpha** (*float*) - Return probability in PPR.
                     Commonly lies in :obj:`[0.05, 0.2]`.
                2. :obj:`"heat"`: Use heat kernel diffusion.
                   Additionally expects the parameter:
                   - **t** (*float*) - Time of diffusion. Commonly lies in
                     :obj:`[2, 10]`.
                3. :obj:`"coeff"`: Freely choose diffusion coefficients.
                   Additionally expects the parameter:
                   - **coeffs** (*List[float]*) - List of coefficients
                     :obj:`theta_k` for each power of the transition matrix
                     (starting at :obj:`0`).
        :rtype: (:class:`Tensor`)
        """
        if method == 'ppr':
            # α (I_n + (α - 1) A)^-1
            edge_weight = (kwargs['alpha'] - 1) * edge_weight
            edge_index, edge_weight = add_self_loops(edge_index,
                                                     edge_weight,
                                                     fill_value=1,
                                                     num_nodes=num_nodes)
            mat = to_dense_adj(edge_index, edge_attr=edge_weight).squeeze()
            diff_matrix = kwargs['alpha'] * torch.inverse(mat)

        elif method == 'heat':
            # exp(t (A - I_n))
            edge_index, edge_weight = add_self_loops(edge_index,
                                                     edge_weight,
                                                     fill_value=-1,
                                                     num_nodes=num_nodes)
            edge_weight = kwargs['t'] * edge_weight
            mat = to_dense_adj(edge_index, edge_attr=edge_weight).squeeze()
            undirected = is_undirected(edge_index, edge_weight, num_nodes)
            diff_matrix = self.__expm__(mat, undirected)

        elif method == 'coeff':
            adj_matrix = to_dense_adj(edge_index,
                                      edge_attr=edge_weight).squeeze()
            mat = torch.eye(num_nodes, device=edge_index.device)

            diff_matrix = kwargs['coeffs'][0] * mat
            for coeff in kwargs['coeffs'][1:]:
                mat = mat @ adj_matrix
                diff_matrix += coeff * mat
        else:
            raise ValueError('Exact GDC diffusion {} unknown.'.format(method))

        return diff_matrix
Ejemplo n.º 3
0
    def forward(self, x_1, x_2, edge_index_pos, edge_index_neg):
        """
        Forward propagation pass with features an indices.
        :param x_1: Features for left hand side vertices.
        :param x_2: Features for right hand side vertices.
        :param edge_index_pos: Positive indices.
        :param edge_index_neg: Negative indices.
        :return out: Abstract convolved features.
        """
        edge_index_pos, _ = remove_self_loops(edge_index_pos, None)
        edge_index_pos, _ = add_self_loops(edge_index_pos, num_nodes=x_1.size(0))
        edge_index_neg, _ = remove_self_loops(edge_index_neg, None)
        edge_index_neg, _ = add_self_loops(edge_index_neg, num_nodes=x_2.size(0))

        row_pos, col_pos = edge_index_pos
        row_neg, col_neg = edge_index_neg

        if self.norm:  # pos: [x_1:balance features; x_2:unbalance features]  neg :[x_1:unbalance features; x_2:balance features]
            out_1 = scatter_mean(x_1[col_pos], row_pos, dim=0, dim_size=x_1.size(0))
            out_2 = scatter_mean(x_2[col_neg], row_neg, dim=0, dim_size=x_2.size(0))
        else:
            out_1 = scatter_add(x_1[col_pos], row_pos, dim=0, dim_size=x_1.size(0))
            out_2 = scatter_add(x_2[col_neg], row_neg, dim=0, dim_size=x_2.size(0))

        out = torch.cat((out_1, out_2, x_1), 1)
        out = torch.matmul(out, self.weight)
        if self.bias is not None:
            out = out + self.bias

        if self.norm_embed:
            out = F.normalize(out, p=2, dim=-1)
        return out
Ejemplo n.º 4
0
def k_hop_edges(edge_index, num_nodes, K):
    # edge_index, _ = remove_self_loops(edge_index, None)
    n = num_nodes
    edge_index, _ = coalesce(edge_index, None, n, n)
    value = edge_index.new_ones((edge_index.size(1), ), dtype=torch.float)

    edges = []
    new_edge_index = edge_index.clone()
    new_edge_value = value.clone()
    useful_edge_index, edge_weight = add_self_loops(new_edge_index,
                                                    None,
                                                    fill_value=-1.,
                                                    num_nodes=num_nodes)
    edges.append(useful_edge_index)
    for i in range(K - 1):
        new_edge_index, new_edge_value = spspmm(new_edge_index, new_edge_value,
                                                edge_index, value, n, n, n)
        new_edge_index, new_edge_value = coalesce(new_edge_index,
                                                  new_edge_value, n, n)
        useful_edge_index, edge_weight = add_self_loops(new_edge_index,
                                                        None,
                                                        fill_value=-1.,
                                                        num_nodes=num_nodes)
        edges.append(useful_edge_index)

    # edge_index, _ = add_self_loops(edge_index, None,fill_value=-1.,num_nodes=num_nodes)
    return edges
Ejemplo n.º 5
0
def create_single_data(mesh_storage: MeshStorage):
    """
    create input data for the network. The data is wrapped by Data structure in pytorch-geometric library
    """

    mesh_data = mesh_storage.mesh_data

    # vertices
    v = np.concatenate((mesh_data.mesh_v, mesh_data.mesh_vn), axis=1)
    v = torch.from_numpy(v).float()
    # topology edges
    print("     gathering topological edges.")
    tpl_e = get_tpl_edges(mesh_data.mesh_v, mesh_data.mesh_f).T
    tpl_e = torch.from_numpy(tpl_e).long()
    tpl_e, _ = add_self_loops(tpl_e, num_nodes=v.size(0))
    # surface geodesic distance matrix
    print("     calculating surface geodesic matrix.")

    surface_geodesic = mesh_storage.surface_geodesic
    # geodesic edges
    print("     gathering geodesic edges.")
    geo_e = get_geo_edges(surface_geodesic, mesh_data.mesh_v).T
    geo_e = torch.from_numpy(geo_e).long()
    geo_e, _ = add_self_loops(geo_e, num_nodes=v.size(0))
    # batch
    batch = torch.zeros(len(v), dtype=torch.long)

    geo_data = Data(x=v[:, 3:6],
                    pos=v[:, 0:3],
                    tpl_edge_index=tpl_e,
                    geo_edge_index=geo_e,
                    batch=batch)
    return geo_data
Ejemplo n.º 6
0
def create_single_data(mesh_filaname):
    """
    create input data for the network. The data is wrapped by Data structure in pytorch-geometric library
    :param mesh_filaname: name of the input mesh
    :return: wrapped data, voxelized mesh, and geodesic distance matrix of all vertices
    """
    mesh = o3d.io.read_triangle_mesh(mesh_filaname)
    mesh.compute_triangle_normals()
    mesh_v = np.asarray(mesh.vertices)
    mesh_vn = np.asarray(mesh.vertex_normals)
    mesh_f = np.asarray(mesh.triangles)

    mesh_v, translation_normalize, scale_normalize = normalize_obj(mesh_v)
    mesh_normalized = o3d.geometry.TriangleMesh(
        vertices=o3d.utility.Vector3dVector(mesh_v),
        triangles=o3d.utility.Vector3iVector(mesh_f))
    o3d.io.write_triangle_mesh(
        mesh_filename.replace("_remesh.obj", "_normalized.obj"),
        mesh_normalized)

    # vertices
    v = np.concatenate((mesh_v, mesh_vn), axis=1)
    v = torch.from_numpy(v).float()

    # topology edges
    print("     gathering topological edges.")
    tpl_e = get_tpl_edges(mesh_v, mesh_f).T
    tpl_e = torch.from_numpy(tpl_e).long()
    tpl_e, _ = add_self_loops(tpl_e, num_nodes=v.size(0))

    # surface geodesic distance matrix
    print("     calculating surface geodesic matrix.")
    surface_geodesic = calc_surface_geodesic(mesh)

    # geodesic edges
    print("     gathering geodesic edges.")
    geo_e = get_geo_edges(surface_geodesic, mesh_v).T
    geo_e = torch.from_numpy(geo_e).long()
    geo_e, _ = add_self_loops(geo_e, num_nodes=v.size(0))

    # batch
    batch = torch.zeros(len(v), dtype=torch.long)

    # voxel
    if not os.path.exists(
            mesh_filaname.replace('_remesh.obj', '_normalized.binvox')):
        os.system("./binvox -d 88 -pb " +
                  mesh_filaname.replace("_remesh.obj", "_normalized.obj"))
    with open(mesh_filaname.replace('_remesh.obj', '_normalized.binvox'),
              'rb') as fvox:
        vox = binvox_rw.read_as_3d_array(fvox)

    data = Data(x=v[:, 3:6],
                pos=v[:, 0:3],
                tpl_edge_index=tpl_e,
                geo_edge_index=geo_e,
                batch=batch)
    return data, vox, surface_geodesic, translation_normalize, scale_normalize
Ejemplo n.º 7
0
def test_add_self_loops():
    edge_index = torch.tensor([[0, 1, 0], [1, 0, 0]])

    expected = [[0, 1, 0, 0, 1], [1, 0, 0, 0, 1]]
    assert add_self_loops(edge_index)[0].tolist() == expected

    edge_weight = torch.tensor([0.5, 0.5, 0.5])
    edge_index, edge_weight = add_self_loops(edge_index, edge_weight)
    assert edge_index.tolist() == expected
    assert edge_weight.tolist() == [0.5, 0.5, 0.5, 1, 1]
Ejemplo n.º 8
0
    def process(self):
        data_list = []
        i = 0.0
        for v_filename in self.raw_paths:
            print('preprecessing data complete: {:.4f}%'.format(
                100 * i / len(self.raw_paths)))
            i += 1.0
            v = np.loadtxt(v_filename)
            m = np.loadtxt(v_filename.replace('_v.txt', '_attn.txt'))
            tpl_e = np.loadtxt(v_filename.replace('_v.txt', '_tpl_e.txt')).T
            geo_e = np.loadtxt(v_filename.replace('_v.txt', '_geo_e.txt')).T
            joints = np.loadtxt(v_filename.replace('_v.txt', '_j.txt'))
            adj = np.loadtxt(v_filename.replace('_v.txt', '_adj.txt'),
                             dtype=np.uint8)

            vox_file = v_filename.replace('_v.txt', '.binvox')
            with open(vox_file, 'rb') as fvox:
                vox = binvox_rw.read_as_3d_array(fvox)
            pairs = list(it.combinations(range(adj.shape[0]), 2))
            pair_attr = []
            for pr in pairs:
                dist = np.linalg.norm(joints[pr[0]] - joints[pr[1]])
                bone_samples = self.sample_on_bone(joints[pr[0]],
                                                   joints[pr[1]])
                bone_samples_inside, _ = self.inside_check(bone_samples, vox)
                outside_proportion = len(bone_samples_inside) / (
                    len(bone_samples) + 1e-10)
                attr = np.array([dist, outside_proportion, adj[pr[0], pr[1]]])
                pair_attr.append(attr)
            pairs = np.array(pairs)
            pair_attr = np.array(pair_attr)
            name = int(v_filename.split('/')[-1].split('_')[0])

            v = torch.from_numpy(v).float()
            m = torch.from_numpy(m).long()
            tpl_e = torch.from_numpy(tpl_e).long()
            geo_e = torch.from_numpy(geo_e).long()
            tpl_e, _ = add_self_loops(tpl_e, num_nodes=v.size(0))
            geo_e, _ = add_self_loops(geo_e, num_nodes=v.size(0))
            joints = torch.from_numpy(joints).float()
            pairs = torch.from_numpy(pairs).float()
            pair_attr = torch.from_numpy(pair_attr).float()
            data_list.append(
                SkeletonData(x=v[:, 3:6],
                             pos=v[:, 0:3],
                             name=name,
                             mask=m,
                             joints=joints,
                             tpl_edge_index=tpl_e,
                             geo_edge_index=geo_e,
                             pairs=pairs,
                             pair_attr=pair_attr))
        data, slices = self.collate(data_list)
        torch.save((data, slices), self.processed_paths[0])
Ejemplo n.º 9
0
def test_add_self_loops():
    row = torch.tensor([0, 1, 0])
    col = torch.tensor([1, 0, 0])
    edge_index = torch.stack([row, col], dim=0)

    expected = [[0, 1, 0, 0, 1], [1, 0, 0, 0, 1]]
    assert add_self_loops(edge_index)[0].tolist() == expected

    edge_weight = torch.Tensor([0.5, 0.5, 0.5])
    edge_index, edge_weight = add_self_loops(edge_index, edge_weight)
    assert edge_index.tolist() == expected
    assert edge_weight.tolist() == [0.5, 0.5, 0.5, 1, 1]
Ejemplo n.º 10
0
    def forward(self, nodefeatures_1, nodefeatures_2, edge_index_pos,
                edge_index_neg):
        """
		:param nodefeatures_1:
		:param nodefeatures_2:
		in the first block, the nodefeatures_1 is the balanced feature matrix, the nodefeatures_2 is the unbalanced
		feature matrix.
		in the first block, the nodefeatures_1 is the unbalanced feature matrix, the nodefeatures_2 is the balanced
		feature matrix.
		:param edge_index_pos: it is the positive edge index in the before layer
		:param edge_index_neg: it is the negative edge index in the before layer
		:return:
		"""
        edge_index_pos, _ = remove_self_loops(edge_index_pos, None)
        # whether we should add some self loops to the node
        edge_index_pos, _ = add_self_loops(edge_index_pos, None)
        edge_index_neg, _ = remove_self_loops(edge_index_neg, None)
        # whether we should add some self loops to the node
        edge_index_neg, _ = add_self_loops(edge_index_neg, None)
        # edge_index_pos is the neighbors of actors'
        row_pos, col_pos = edge_index_pos
        row_neg, col_neg = edge_index_neg

        if self.norm:
            out_1 = scatter_mean(nodefeatures_1[col_pos],
                                 row_pos,
                                 dim=0,
                                 dim_size=nodefeatures_1.size(0))
            out_2 = scatter_mean(nodefeatures_2[col_neg],
                                 row_neg,
                                 dim=0,
                                 dim_size=nodefeatures_2.size(0))
        else:
            out_1 = scatter_add(nodefeatures_1[col_pos],
                                row_pos,
                                dim=0,
                                dim_size=nodefeatures_1.size(0))
            out_2 = scatter_add(nodefeatures_2[col_neg],
                                row_neg,
                                dim=0,
                                dim_size=nodefeatures_2.size(0))
        # now we have the dimension of the input features of node_feature * 3.
        out = torch.cat((out_1, out_2, nodefeatures_1), dim=1)
        out = out.mm(self.weight)

        if self.bias is not None:
            out = out + self.bias
        if self.norm_embed:
            out = F.normalize(out, p=2, dim=-1)
        return out
Ejemplo n.º 11
0
def call(data, name, num_features, num_classes):
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    data.edge_index, _ = remove_self_loops(data.edge_index)
    data.edge_index = add_self_loops(data.edge_index, num_nodes=data.x.size(0))
    data = data.to(device)
    model = Net(data, num_features, num_classes).to(device)
    return model, data
Ejemplo n.º 12
0
 def forward(self, x, edge_index):
     """"""
     edge_index, _ = remove_self_loops(edge_index)
     edge_index, _ = add_self_loops(edge_index, num_nodes=x.size(0))
     # prepare
     x = torch.mm(x, self.weight).view(-1, self.heads, self.out_channels)
     return self.propagate(edge_index, x=x, num_nodes=x.size(0))
Ejemplo n.º 13
0
    def norm(edge_index, num_nodes, edge_weight, dtype=None):
        edge_index, _ = remove_self_loops(edge_index)

        if edge_weight is None:
            edge_weight = torch.ones((edge_index.size(1), ),
                                     dtype=dtype,
                                     device=edge_index.device)

        row, col = edge_index
        deg = scatter_add(edge_weight, row, dim=0, dim_size=num_nodes)
        deg_inv_sqrt = deg.pow(-0.5)
        deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0

        edge_index, edge_weight = add_self_loops(edge_index, edge_weight, 0,
                                                 num_nodes)

        row, col = edge_index
        expand_deg = torch.zeros((edge_weight.size(0), ),
                                 dtype=dtype,
                                 device=edge_index.device)
        expand_deg[-num_nodes:] = torch.ones((num_nodes, ),
                                             dtype=dtype,
                                             device=edge_index.device)

        return edge_index, expand_deg - deg_inv_sqrt[
            row] * edge_weight * deg_inv_sqrt[col]
Ejemplo n.º 14
0
def test_attr(model, data):
    model.eval()
    accs = []
    m = ['train_mask', 'val_mask', 'test_mask']
    i = 0
    for _, mask in data('train_mask', 'val_mask', 'test_mask'):

        if m[i] == 'train_mask':
            x, pos_edge_index = data.x, data.train_pos_edge_index

            _edge_index, _ = remove_self_loops(pos_edge_index)
            pos_edge_index_with_self_loops, _ = add_self_loops(_edge_index,
                                                               num_nodes=x.size(0))

            neg_edge_index = negative_sampling(
                edge_index=pos_edge_index_with_self_loops, num_nodes=x.size(0),
                num_neg_samples=pos_edge_index.size(1))
        else:
            pos_edge_index, neg_edge_index = [
                index for _, index in data("{}_pos_edge_index".format(m[i].split("_")[0]),
                                           "{}_neg_edge_index".format(m[i].split("_")[0]))
            ]
        neg_edge_index = neg_edge_index.to(pos_edge_index.device)
        _, logits, _, _ = model(pos_edge_index, neg_edge_index)

        pred = logits[mask].max(1)[1]

        macro = f1_score((data.y[mask]).cpu().numpy(), pred.cpu().numpy(), average='macro')
        accs.append(macro)

        i += 1
    return accs
Ejemplo n.º 15
0
    def norm(edge_index, num_nodes, edge_weight, improved=False, dtype=None):
        if edge_weight is None:
            edge_weight = torch.ones((edge_index.size(1), ),
                                     dtype=dtype,
                                     device=edge_index.device)
        edge_weight = edge_weight.view(-1)
        assert edge_weight.size(0) == edge_index.size(1)

        edge_index, edge_weight = remove_self_loops(edge_index, edge_weight)
        edge_index, _ = add_self_loops(edge_index, num_nodes=num_nodes)
        #pdb.set_trace()
        loop_weight = torch.full((num_nodes, ),
                                 1 if not improved else 2,
                                 dtype=edge_weight.dtype,
                                 device=edge_weight.device)
        edge_weight = torch.cat([edge_weight, loop_weight], dim=0)
        #pdb.set_trace()

        row, col = edge_index

        deg = scatter_add(edge_weight, col, dim=0, dim_size=num_nodes)
        deg_inv_sqrt = deg.pow(-1)
        deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0

        return edge_index, deg_inv_sqrt[
            col] * edge_weight  # * deg_inv_sqrt[col]
Ejemplo n.º 16
0
    def forward(self, x: Tensor, edge_index: Adj) -> Tensor:
        """"""
        edge_weight: OptTensor = None
        if isinstance(edge_index, Tensor):
            num_nodes = x.size(self.node_dim)
            if self.add_self_loops:
                edge_index, _ = remove_self_loops(edge_index)
                edge_index, _ = add_self_loops(edge_index, num_nodes=num_nodes)

            row, col = edge_index[0], edge_index[1]
            deg_inv = 1. / degree(col, num_nodes=num_nodes).clamp_(1.)

            edge_weight = deg_inv[col]
            edge_weight[row == col] += self.diag_lambda * deg_inv

        elif isinstance(edge_index, SparseTensor):
            if self.add_self_loops:
                edge_index = set_diag(edge_index)

            col, row, _ = edge_index.coo()  # Transposed.
            deg_inv = 1. / sparsesum(edge_index, dim=1).clamp_(1.)

            edge_weight = deg_inv[col]
            edge_weight[row == col] += self.diag_lambda * deg_inv
            edge_index = edge_index.set_value(edge_weight, layout='coo')

        # propagate_type: (x: Tensor, edge_weight: OptTensor)
        out = self.propagate(edge_index,
                             x=x,
                             edge_weight=edge_weight,
                             size=None)
        out = self.lin_out(out) + self.lin_root(x)

        return out
Ejemplo n.º 17
0
    def norm(edge_index,
             num_nodes,
             edge_weight,
             normalization,
             lambda_max,
             dtype=None,
             batch=None):

        edge_index, edge_weight = remove_self_loops(edge_index, edge_weight)

        edge_index, edge_weight = get_laplacian(edge_index, edge_weight,
                                                normalization, dtype,
                                                num_nodes)

        if batch is not None and torch.is_tensor(lambda_max):
            lambda_max = lambda_max[batch[edge_index[0]]]

        edge_weight = (2.0 * edge_weight) / lambda_max
        edge_weight[edge_weight == float('inf')] = 0

        edge_index, edge_weight = add_self_loops(edge_index,
                                                 edge_weight,
                                                 fill_value=-1,
                                                 num_nodes=num_nodes)

        return edge_index, edge_weight
Ejemplo n.º 18
0
    def __norm__(self,
                 edge_index,
                 num_nodes: Optional[int],
                 edge_weight: OptTensor,
                 normalization: Optional[str],
                 lambda_max,
                 dtype: Optional[int] = None,
                 batch: OptTensor = None):

        edge_index, edge_weight = remove_self_loops(edge_index, edge_weight)

        edge_index, edge_weight = get_laplacian(edge_index, edge_weight,
                                                normalization, dtype,
                                                num_nodes)

        if batch is not None and lambda_max.numel() > 1:
            lambda_max = lambda_max[batch[edge_index[0]]]

        edge_weight = (2.0 * edge_weight) / lambda_max
        edge_weight.masked_fill_(edge_weight == float('inf'), 0)

        edge_index, edge_weight = add_self_loops(edge_index,
                                                 edge_weight,
                                                 fill_value=-1.,
                                                 num_nodes=num_nodes)
        assert edge_weight is not None

        return edge_index, edge_weight
Ejemplo n.º 19
0
    def forward(self, x, edge_index, edge_attr, params, param_name_dict, size=None):
        self.att = get_param(params, param_name_dict, "att")
        # self.edge_update = params[self.get_param_id(param_name_dict, 'edge_update')]
        self.bias = None
        if self.use_bias:
            self.bias = get_param(params, param_name_dict, "bias")
        if size is None and torch.is_tensor(x):
            edge_index, _ = remove_self_loops(edge_index)
            edge_index, _ = add_self_loops(edge_index, num_nodes=x.size(0))

        # get gru params
        self.gru_weight_ih = get_param(params, param_name_dict, "gru_w_ih")
        self.gru_weight_hh = get_param(params, param_name_dict, "gru_w_hh")
        self.gru_bias_ih = get_param(params, param_name_dict, "gru_b_ih")
        self.gru_bias_hh = get_param(params, param_name_dict, "gru_b_hh")
        self.gru_hx = x

        # Note: we need to add blank edge attributes for self loops
        weight = get_param(params, param_name_dict, "weight")
        if torch.is_tensor(x):
            x = torch.matmul(x, weight)
        else:
            x = (
                None if x[0] is None else torch.matmul(x[0], weight),
                None if x[1] is None else torch.matmul(x[1], weight),
            )
        return self.propagate(
            edge_index, size=size, x=x, num_nodes=x.size(0), edge_attr=edge_attr
        )
Ejemplo n.º 20
0
    def forward(self, x: Union[OptTensor, PairOptTensor],
                pos: Union[Tensor, PairTensor], edge_index: Adj) -> Tensor:
        """"""
        if not isinstance(x, tuple):
            x: PairOptTensor = (x, None)

        if isinstance(pos, Tensor):
            pos: PairTensor = (pos, pos)

        if self.add_self_loops:
            if isinstance(edge_index, Tensor):
                edge_index, _ = remove_self_loops(edge_index)
                edge_index, _ = add_self_loops(edge_index,
                                               num_nodes=min(
                                                   pos[0].size(0),
                                                   pos[1].size(0)))
            elif isinstance(edge_index, SparseTensor):
                edge_index = set_diag(edge_index)

        # propagate_type: (x: PairOptTensor, pos: PairTensor)
        out = self.propagate(edge_index, x=x, pos=pos, size=None)

        if self.global_nn is not None:
            out = self.global_nn(out)

        return out
Ejemplo n.º 21
0
    def forward(self, x, edge_index, edge_attr, params, param_name_dict, size=None):
        self.att = get_param(params, param_name_dict, "att")
        self.edge_update = get_param(params, param_name_dict, "edge_update")
        self.bias = None
        if self.use_bias:
            self.bias = get_param(params, param_name_dict, "bias")
        if size is None and torch.is_tensor(x):
            edge_index, _ = remove_self_loops(edge_index)
            edge_index, _ = add_self_loops(edge_index, num_nodes=x.size(0))

        # edge_index = add_self_loops(edge_index, num_nodes=x.size(0))
        self_loop_edges = torch.zeros(x.size(0), edge_attr.size(1)).to(
            edge_index.device
        )
        edge_attr = torch.cat([edge_attr, self_loop_edges], dim=0)  # (500, 10)
        # Note: we need to add blank edge attributes for self loops
        weight = get_param(params, param_name_dict, "weight")
        if torch.is_tensor(x):
            x = torch.matmul(x, weight)
        else:
            x = (
                None if x[0] is None else torch.matmul(x[0], weight),
                None if x[1] is None else torch.matmul(x[1], weight),
            )
        # x = x.view(-1, self.heads, self.out_channels)
        # x = torch.mm(x, weight).view(-1, self.heads, self.out_channels)
        return self.propagate(
            edge_index, size=size, x=x, num_nodes=x.size(0), edge_attr=edge_attr
        )
Ejemplo n.º 22
0
def test_add_self_loops():
    row = torch.LongTensor([0, 1, 0])
    col = torch.LongTensor([1, 0, 0])
    expected_output = [[0, 1, 0, 0, 1], [1, 0, 0, 0, 1]]

    output = add_self_loops(torch.stack([row, col], dim=0))
    assert output.tolist() == expected_output
Ejemplo n.º 23
0
def call(data, name, num_features, num_classes):
    filename = '../data/curvature/graph_' + name + '.edge_list'
    f = open(filename)
    cur_list = list(f)
    if name == 'Cora' or name == 'CS':
        ricci_cur = [[] for i in range(len(cur_list))]
        for i in range(len(cur_list)):
            ricci_cur[i] = [num(s) for s in cur_list[i].split(' ', 2)]
    else:
        ricci_cur = [[] for i in range(2 * len(cur_list))]
        for i in range(len(cur_list)):
            ricci_cur[i] = [num(s) for s in cur_list[i].split(' ', 2)]
            ricci_cur[i + len(cur_list)] = [
                ricci_cur[i][1], ricci_cur[i][0], ricci_cur[i][2]
            ]
    ricci_cur = sorted(ricci_cur)
    w_mul = [i[2] for i in ricci_cur]
    #w_mul=[(i[2]+1)/2 for i in ricci_cur]
    w_mul = w_mul + [0 for i in range(data.x.size(0))]
    w_mul = torch.tensor(w_mul, dtype=torch.float)
    data.edge_index, _ = remove_self_loops(data.edge_index)
    data.edge_index, _ = add_self_loops(data.edge_index,
                                        num_nodes=data.x.size(0))
    data.w_mul = w_mul.view(-1, 1)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    #change to call function
    data.w_mul = data.w_mul.to(device)
    model, data = Net(data, num_features, num_classes,
                      data.w_mul).to(device), data.to(device)
    return model, data
Ejemplo n.º 24
0
def main():

    args = ArgsInit().args

    dataset = PygNodePropPredDataset(name=args.dataset)
    graph = dataset[0]

    if args.self_loop:
        graph.edge_index = add_self_loops(edge_index=graph.edge_index,
                                          num_nodes=graph.num_nodes)[0]
    split_idx = dataset.get_idx_split()

    evaluator = Evaluator(args.dataset)

    args.in_channels = graph.x.size(-1)
    args.num_tasks = dataset.num_classes

    print(args)

    model = DeeperGCN(args)

    print(model)

    model.load_state_dict(torch.load(args.model_load_path)['model_state_dict'])
    result = test(model, graph.x, graph.edge_index, graph.y, split_idx,
                  evaluator)
    print(result)
    model.print_params(final=True)
    def forward(
        self,
        x: Union[Tensor, PairTensor],
        pos: Union[Tensor, PairTensor],
        edge_index: Adj,
    ) -> Tensor:
        """"""
        if isinstance(x, Tensor):
            alpha = (self.lin_src(x), self.lin_dst(x))
            x: PairTensor = (self.lin(x), x)
        else:
            alpha = (self.lin_src(x[0]), self.lin_dst(x[1]))
            x = (self.lin(x[0]), x[1])

        if isinstance(pos, Tensor):
            pos: PairTensor = (pos, pos)

        if self.add_self_loops:
            if isinstance(edge_index, Tensor):
                edge_index, _ = remove_self_loops(edge_index)
                edge_index, _ = add_self_loops(edge_index,
                                               num_nodes=min(
                                                   pos[0].size(0),
                                                   pos[1].size(0)))
            elif isinstance(edge_index, SparseTensor):
                edge_index = set_diag(edge_index)

        # propagate_type: (x: PairTensor, pos: PairTensor, alpha: PairTensor)
        out = self.propagate(edge_index, x=x, pos=pos, alpha=alpha, size=None)
        return out
Ejemplo n.º 26
0
    def forward(self, x, edge_index, edge_weight=None):
        """"""
        if edge_weight is None:
            edge_weight = torch.ones((edge_index.size(1), ),
                                     dtype=x.dtype,
                                     device=x.device)
        edge_weight = edge_weight.view(-1)
        assert edge_weight.size(0) == edge_index.size(1)

        edge_index = add_self_loops(edge_index, num_nodes=x.size(0))
        loop_weight = torch.full((x.size(0), ),
                                 1 if not self.improved else 2,
                                 dtype=x.dtype,
                                 device=x.device)
        edge_weight = torch.cat([edge_weight, loop_weight], dim=0)

        row, col = edge_index
        deg = scatter_add(edge_weight, row, dim=0, dim_size=x.size(0))
        deg_inv = deg.pow(-0.5)
        deg_inv[deg_inv == float('inf')] = 0

        norm = deg_inv[row] * edge_weight * deg_inv[col]

        x = torch.matmul(x, self.weight)
        return self.propagate('add', edge_index, x=x, norm=norm)
Ejemplo n.º 27
0
    def forward(self, x, edge_index, edge_attr):
        #add self loops in the edge space
        edge_index = add_self_loops(edge_index, num_nodes=x.size(0))

        #add features corresponding to self-loop edges.
        self_loop_attr = torch.zeros(x.size(0), 9)
        self_loop_attr[:, 7] = 1  # attribute for self-loop edge
        self_loop_attr = self_loop_attr.to(edge_attr.device).to(
            edge_attr.dtype)
        edge_attr = torch.cat((edge_attr, self_loop_attr), dim=0)

        edge_embeddings = self.edge_encoder(edge_attr)

        if self.input_layer:
            x = self.input_node_embeddings(x.to(torch.int64).view(-1, ))

        norm = self.norm(edge_index, x.size(0), x.dtype)

        x = self.linear(x)

        return self.propagate(self.aggr,
                              edge_index,
                              x=x,
                              edge_attr=edge_embeddings,
                              norm=norm)
Ejemplo n.º 28
0
    def forward(self, x, edge_neighbors, edge_weight=None):
        """
            needs to be changed to have:
            - edge_neighborhoods
            - xedge as features for edges
            - aggregate function 

        """
        if edge_weight is None:
            edge_weight = torch.ones((edge_neighbors.size(1), ),
                                     dtype=x.dtype,
                                     device=x.device)
        edge_weight = edge_weight.view(-1)
        assert edge_weight.size(0) == edge_neighbors.size(1)

        edge_neighbors = add_self_loops(edge_neighbors, num_nodes=x.size(0))
        loop_weight = torch.full((x.size(0), ),
                                 1 if not self.improved else 2,
                                 dtype=x.dtype,
                                 device=x.device)
        edge_weight = torch.cat([edge_weight, loop_weight], dim=0)

        row, col = edge_neighbors
        deg = scatter_add(edge_weight, row, dim=0, dim_size=x.size(0))
        deg_inv = deg.pow(-0.5)
        deg_inv[deg_inv == float('inf')] = 0

        norm = deg_inv[row] * edge_weight * deg_inv[col]

        x = torch.matmul(x, self.weight)
        return self.propagate('add', edge_neighbors, x=x, norm=norm)
Ejemplo n.º 29
0
    def gcn_norm(self,
                 edge_index,
                 edge_weight=None,
                 num_nodes=None,
                 improved=False,
                 dtype=None):
        fill_value = 2. if improved else 1.

        num_nodes = maybe_num_nodes(edge_index, num_nodes)

        if edge_weight is None:
            edge_weight = torch.ones((edge_index.size(1), ),
                                     dtype=dtype,
                                     device=edge_index.device)

        edge_index, tmp_edge_weight = add_self_loops(edge_index, edge_weight,
                                                     fill_value, num_nodes)
        assert tmp_edge_weight is not None
        edge_weight = tmp_edge_weight

        row, col = edge_index[0], edge_index[1]
        deg = scatter_add(edge_weight, col, dim=0, dim_size=num_nodes)
        deg_inv_sqrt = deg.pow_(-0.5)
        deg_inv_sqrt.masked_fill_(deg_inv_sqrt == float('inf'), 0)
        return edge_index, deg_inv_sqrt[row] * edge_weight * deg_inv_sqrt[col]
    def get_batch(self, splt: str) -> Tuple[torch.Tensor, torch.Tensor]:
        from torch_geometric.utils import (negative_sampling,
                                           remove_self_loops, add_self_loops)
        n = self.x.shape[0]

        if splt == 'train':
            pos_edge_index = self.train_edge_index
            num_neg_edges = pos_edge_index.shape[1]

            pos_edge_clean, _ = remove_self_loops(pos_edge_index)
            pos_edge_w_self_loop, _ = add_self_loops(pos_edge_clean,
                                                     num_nodes=n)

            neg_edge_index = negative_sampling(edge_index=pos_edge_w_self_loop,
                                               num_nodes=n,
                                               num_neg_samples=num_neg_edges)
        elif splt == 'val':
            pos_edge_index, neg_edge_index = self.val_edge_index
        elif splt == 'test':
            pos_edge_index, neg_edge_index = self.test_edge_index
        else:
            raise ValueError(f'Unknown splt: {splt}')

        query_edge_index = torch.cat([pos_edge_index, neg_edge_index], dim=-1)
        link_y = torch.zeros_like(query_edge_index[0], dtype=torch.float)
        link_y[:pos_edge_index.shape[1]] = 1

        return query_edge_index, link_y