def asarray(x: Any, dtype: Optional[str] = None) -> np.ndarray: """Convert `x` to interger Numpy array. Parameters: ---------- x: Tensor, Scipy sparse matrix, Numpy array-like, etc. Returns: ---------- Integer Numpy array with dtype or `'int64'` """ if dtype is None: dtype = 'int64' if torch.is_tensor(x): if x.dtype != dtype: return x.to(getattr(torch, dtype)) else: return x if gg.is_intscalar(x): x = np.asarray([x], dtype=dtype) elif gg.is_listlike(x) or (isinstance(x, np.ndarray) and x.dtype != "O"): x = np.asarray(x, dtype=dtype) else: raise ValueError( f"Invalid input which should be either array-like or integer scalar, but got {type(x)}." ) return x
def normalize_adj(adj_matrix, rate=-0.5, fill_weight=1.0, symmetric=True): """Normalize adjacency matrix. >>> normalize_adj(adj, rate=-0.5) # return a normalized adjacency matrix # return a list of normalized adjacency matrices >>> normalize_adj(adj, rate=[-0.5, 1.0]) Parameters ---------- adj_matrix: Scipy matrix or Numpy array or a list of them Single or a list of Scipy sparse matrices or Numpy arrays. rate: Single or a list of float scale, optional. the normalize rate for `adj_matrix`. fill_weight: float scalar, optional. weight of self loops for the adjacency matrix. symmetric: bool, optional whether to use symmetrical normalization Returns ---------- Single or a list of Scipy sparse matrix or Numpy matrices. See also ---------- graphgallery.functional.NormalizeAdj """ def _normalize_adj(adj, r): # here a new copy of adj is created if fill_weight: adj = adj + fill_weight * sp.eye(adj.shape[0], dtype=adj.dtype, format='csr') else: adj = adj.copy() if r is None: return adj degree = np.ravel(adj.sum(1)) degree_power = np.power(degree, r) if sp.isspmatrix(adj): adj = adj.tocoo(copy=False) adj.data = degree_power[adj.row] * adj.data if symmetric: adj.data *= degree_power[adj.col] adj = adj.tocsr(copy=False) else: degree_power_matrix = sp.diags(degree_power) adj = degree_power_matrix @ adj if symmetric: adj = adj @ degree_power_matrix return adj if gg.is_listlike(rate): return tuple(_normalize_adj(adj_matrix, r) for r in rate) else: return _normalize_adj(adj_matrix, rate)
def astensor(x, *, dtype=None, device=None, escape=None): try: if x is None or (escape is not None and isinstance(x, escape)): return x except TypeError: raise TypeError(f"argument 'escape' must be a type or tuple of types.") if dtype is None: dtype = gf.infer_type(x) if isinstance(dtype, (np.dtype, str)): dtype = data_type_dict().get(str(dtype), dtype) elif not isinstance(dtype, torch.dtype): raise TypeError( f"argument 'dtype' must be torch.dtype, np.dtype or str, but got {type(dtype)}." ) if is_tensor(x): tensor = x.to(dtype) elif gf.is_tensor(x, backend='tensorflow'): return astensor(gf.tensoras(x), dtype=dtype, device=device, escape=escape) elif sp.isspmatrix(x): if gg.backend() == "dgl_torch": import dgl tensor = dgl.from_scipy(x, idtype=getattr(torch, gg.intx())) elif gg.backend() == "pyg": edge_index, edge_weight = gf.sparse_adj_to_edge(x) return (astensor(edge_index, dtype=gg.intx(), device=device, escape=escape), astensor(edge_weight, dtype=gg.floatx(), device=device, escape=escape)) else: tensor = sparse_adj_to_sparse_tensor(x, dtype=dtype) elif any((isinstance(x, (np.ndarray, np.matrix)), gg.is_listlike(x), gg.is_scalar(x))): tensor = torch.tensor(x, dtype=dtype, device=device) else: raise TypeError( f"Invalid type of inputs. Allowed data type (Tensor, SparseTensor, Numpy array, Scipy sparse tensor, None), but got {type(x)}." ) return tensor.to(device)
def get( transform: Union[str, Transform, None, List, Tuple, "Compose"]) -> Transform: if gg.is_listlike(transform): return Compose(*transform) if isinstance(transform, Transform) or callable(transform): return transform elif transform is None: return NullTransform() _transform = str(transform).lower() _transform = _TRANSFORMS.get(_transform, None) if _transform is None: raise ValueError( f"Unknown transform: '{transform}', expected a string, callable function or None." ) return _transform()
def get(transform): if gg.is_listlike(transform) and not is_name_dict_tuple(transform): return Compose(*transform) if isinstance(transform, BaseTransform) or callable(transform): return transform elif transform is None: return NullTransform() transform_para = {} if isinstance(transform, tuple): transform, transform_para = transform original_transform = transform assert isinstance(transform, str), transform if transform not in Transform: transform = "".join(map(lambda s: s.title(), transform.split("_"))) if transform not in Transform: raise ValueError(f"transform not found `{original_transform}`.") return Transform.get(transform)(**transform_para)
def get( transform: Union[str, Transform, None, List, Tuple, "Compose"]) -> Transform: if gg.is_listlike(transform) and not name_dict_tuple(transform): return Compose(*transform) if isinstance(transform, Transform) or callable(transform): return transform elif transform is None: return NullTransform() transform_para = {} if isinstance(transform, tuple): transform, transform_para = transform assert isinstance(transform, str), transform if transform not in Transformers: transform = "".join(map(lambda s: s.title(), transform.split("_"))) return Transformers.get(transform)(**transform_para)
def astensor(x, *, dtype=None, device=None, escape=None): try: if x is None or (escape is not None and isinstance(x, escape)): return x except TypeError: raise TypeError(f"argument 'escape' must be a type or tuple of types.") if dtype is None: dtype = gf.infer_type(x) elif isinstance(dtype, tf.dtypes.DType): dtype = dtype.name elif isinstance(dtype, (np.dtype, str)): dtype = str(dtype) else: raise TypeError( f"argument 'dtype' must be tf.dtypes.DType, np.dtype or str, but got {type(dtype)}." ) with tf.device(device): if is_tensor(x): if x.dtype != dtype: return tf.cast(x, dtype=dtype) return tf.identity(x) elif gf.is_tensor(x, backend='torch'): return astensor(gf.tensoras(x), dtype=dtype, device=device, escape=escape) elif sp.isspmatrix(x): if gg.backend() == "dgl_tf": import dgl return dgl.from_scipy(x, idtype=getattr(tf, gg.intx())).to(device) else: return sparse_adj_to_sparse_tensor(x, dtype=dtype) elif any((isinstance(x, (np.ndarray, np.matrix)), gg.is_listlike(x), gg.is_scalar(x))): return tf.convert_to_tensor(x, dtype=dtype) else: raise TypeError( f"Invalid type of inputs. Allowed data type(Tensor, SparseTensor, Numpy array, Scipy sparse matrix, None), but got {type(x)}." )
def add_selfloops(adj_matrix: sp.csr_matrix, fill_weight: float = 1.0): """Normalize adjacency matrix. >>> add_selfloops(adj, fill_weight=1.0) # return a normalized adjacency matrix # return a list of normalized adjacency matrices >>> add_selfloops(adj, adj, fill_weight=[1.0, 2.0]) Parameters ---------- adj_matrix: Scipy matrix or Numpy array or a list of them Single or a list of Scipy sparse matrices or Numpy arrays. fill_weight: float scalar, optional. weight of self loops for the adjacency matrix. Returns ------- Single or a list of Scipy sparse matrix or Numpy matrices. See also ---------- graphgallery.functional.AddSelfLoops """ def _add_selfloops(adj, w): if sp.issparse(adj): adj = adj - sp.diags(adj.diagonal()) else: adj = adj - np.diag(adj) # here a new copy of adj is created if w: return adj + w * sp.eye(adj.shape[0], dtype=adj.dtype) else: return adj if gg.is_listlike(fill_weight): return tuple(_add_selfloops(adj_matrix, w) for w in fill_weight) else: return _add_selfloops(adj_matrix, fill_weight)
def astensor(x, *, dtype=None, device=None, escape=None) -> torch.Tensor: try: if x is None or (escape is not None and isinstance(x, escape)): return x except TypeError: raise TypeError(f"argument 'escape' must be a type or tuple of types.") device = torch.device(device) if device is not None else torch.device( "cpu") # update: accept `dict` instance if isinstance(x, dict): for k, v in x.items(): try: x[k] = astensor(v, dtype=dtype, device=device, escape=escape) except TypeError: pass return x if dtype is None: dtype = gf.infer_type(x) if isinstance(dtype, (np.dtype, str)): dtype = data_type_dict().get(str(dtype), dtype) elif not isinstance(dtype, torch.dtype): raise TypeError( f"argument 'dtype' must be torch.dtype, np.dtype or str, but got {type(dtype)}." ) if is_tensor(x): tensor = x.to(dtype) elif sp.isspmatrix(x): if gg.backend() == "dgl": import dgl if x.sum() != x.nnz: warnings.warn( "Got a weighted sparse matrix with elements not equal to 1. " "The element weights can be accessed by `g.edata['_edge_weight'].`" ) tensor = dgl.from_scipy(x, idtype=torch.int64, eweight_name="_edge_weight") else: tensor = dgl.from_scipy(x, idtype=torch.int64) elif gg.backend() == "pyg": edge_index, edge_weight = gf.sparse_adj_to_edge(x) return (astensor(edge_index, dtype=torch.int64, device=device, escape=escape), astensor(edge_weight, dtype=torch.float32, device=device, escape=escape)) else: tensor = sparse_adj_to_sparse_tensor(x, dtype=dtype) elif any((isinstance(x, (np.ndarray, np.matrix)), gg.is_listlike(x), gg.is_scalar(x))): tensor = torch.tensor(x, dtype=dtype, device=device) else: raise TypeError( f"Invalid type of inputs. Allowed data type (Tensor, SparseTensor, Numpy array, Scipy sparse tensor, None), but got {type(x)}." ) return tensor.to(device)