def astensor(x, *, dtype=None, device=None, escape=None): try: if x is None or (escape is not None and isinstance(x, escape)): return x except TypeError: raise TypeError(f"argument 'escape' must be a type or tuple of types.") if dtype is None: dtype = gf.infer_type(x) if isinstance(dtype, (np.dtype, str)): dtype = data_type_dict().get(str(dtype), dtype) elif not isinstance(dtype, torch.dtype): raise TypeError( f"argument 'dtype' must be torch.dtype, np.dtype or str, but got {type(dtype)}." ) if is_tensor(x): tensor = x.to(dtype) elif gf.is_tensor(x, backend='tensorflow'): return astensor(gf.tensoras(x), dtype=dtype, device=device, escape=escape) elif sp.isspmatrix(x): if gg.backend() == "dgl_torch": import dgl tensor = dgl.from_scipy(x, idtype=getattr(torch, gg.intx())) elif gg.backend() == "pyg": edge_index, edge_weight = gf.sparse_adj_to_edge(x) return (astensor(edge_index, dtype=gg.intx(), device=device, escape=escape), astensor(edge_weight, dtype=gg.floatx(), device=device, escape=escape)) else: tensor = sparse_adj_to_sparse_tensor(x, dtype=dtype) elif any((isinstance(x, (np.ndarray, np.matrix)), gg.is_listlike(x), gg.is_scalar(x))): tensor = torch.tensor(x, dtype=dtype, device=device) else: raise TypeError( f"Invalid type of inputs. Allowed data type (Tensor, SparseTensor, Numpy array, Scipy sparse tensor, None), but got {type(x)}." ) return tensor.to(device)
def astensor(x, dtype=None, device=None, kind=None): """Convert input matrices to Tensor or SparseTensor. Parameters: ---------- x: tf.Tensor, tf.Variable, Scipy sparse matrix, Numpy array-like, etc. dtype: The type of Tensor `x`, if not specified, it will automatically using appropriate data type. See `graphgallery.infer_type`. device (:class:`torch.device` or `tf.device`, optional): the desired device of returned tensor. Default: if ``None``, uses the current device for the default tensor type kind: str "T" tf "P" torch Returns: ---------- Tensor or SparseTensor with dtype: 1. `graphgallery.floatx()` if `x` is floating 2. `graphgallery.intx() ` if `x` is integer 3. `Bool` if `x` is bool. """ if kind is None: kind = backend().kind else: assert kind in {"T", "P"} device = parse_device(device, kind) if kind == "T": return tf_tensor.astensor(x, dtype=dtype, device=device) else: return th_tensor.astensor(x, dtype=dtype, device=device)
def astensors(*xs, device=None, kind=None): """Convert input matrices to Tensor(s) or SparseTensor(s). Parameters: ---------- xs: tf.Tensor, tf.Variable, Scipy sparse matrix, Numpy array-like, or a list of them, etc. device (:class:`torch.device`, optional): the desired device of returned tensor. Default: if ``None``, uses the current device for the default tensor type (see :func:`torch.set_default_tensor_type`). :attr:`device` will be the CPU for CPU tensor types and the current CUDA device for CUDA tensor types. NOTE: ---------- The argument `device` only work for `PyTorch backend`. Returns: ---------- Tensor(s) or SparseTensor(s) with dtype: 1. `graphgallery.floatx()` if `x` in `xs` is floating 2. `graphgallery.intx() ` if `x` in `xs` is integer 3. `Bool` if `x` in `xs` is bool. """ if kind is None: kind = backend().kind else: assert kind in {"T", "P"} if kind == "T": return tf_tensor.astensors(*xs, device=device) else: return th_tensor.astensors(*xs, device=device)
def is_sparse_tensor(x): """Check whether `x` is a sparse Tensor.""" if backend().kind == "T": return is_tf_sparse_tensor(x) else: return is_th_sparse_tensor(x)
def astensors(*xs, dtype=None, device=None, backend=None, escape=None): """Convert input matrices to Tensor(s) or SparseTensor(s). Parameters: ---------- xs: one or a list of python object(s) dtype: The type of Tensor 'x', if not specified, it will automatically use appropriate data type. See 'graphgallery.infer_type'. device: tf.device, optional. the desired device of returned tensor. Default: if 'None', uses the CPU device for the default tensor type. backend: String or BackendModule, optional. 'tensorflow', 'torch', TensorFlowBackend, PyTorchBackend, etc. if not specified, return the current default backend module. escape: a Class or a tuple of Classes, `astensor` will disabled if `isinstance(x, escape)`. Returns: ------- Tensor(s) or SparseTensor(s) with dtype. If dtype is 'None', dtype will be one of the following: 1. 'graphgallery.floatx()' if 'x' is floating. 2. 'graphgallery.intx()' if 'x' is integer. 3. 'graphgallery.boolx()' if 'x' is boolean. """ backend = gg.backend(backend) device = gf.device(device, backend) return _astensors_fn(*xs, dtype=dtype, device=device, backend=backend, escape=escape)
def is_tensor(x, kind=None): """Check whether `x` is tf.Tensor, tf.Variable, tf.RaggedTensor, tf.sparse.SparseTensor, torch.Tensor, torch.sparse.Tensor. Parameters: ---------- x: A python object to check. kind: str, optional. "T" for TensorFlow "P" for PyTorch if not specified, using `backend().kind` instead. Returns: ---------- `True` iff `x` is a (tf or torch) (sparse-)tensor. """ if kind is None: kind = backend().kind else: assert_kind(kind) if kind == "T": return is_tf_tensor(x) else: return is_th_tensor(x)
def is_strided_tensor(x, kind=None): """Check whether `x` is a strided (dense) Tensor. Parameters: ---------- x: A python object to check. kind: str, optional. "T" for TensorFlow "P" for PyTorch if not specified, using `backend().kind` instead. Returns: ---------- `True` iff `x` is a (tf or torch) strided (dense) Tensor. """ if kind is None: kind = backend().kind else: assert_kind(kind) if kind == "T": return is_tf_strided_tensor(x) else: return is_th_strided_tensor(x)
def load_models(backend_name=None): _backend = backend(backend_name) thismod = sys.modules[__name__] mod = importlib.import_module(f".gallery_model.{_backend.abbr}", __name__) global Gallery Gallery = gf.Registry("GraphGalleryModels") for model in _GALLERY_MODELS: _model_class = mod.__dict__.get(model, None) if _model_class is not None: Gallery.register(_model_class) setattr(thismod, model, _model_class) else: setattr(thismod, model, _gen_missing_model(model, _backend)) mod = importlib.import_module(f".sklearn_model", __name__) for model in _SKLEARN_MODELS: _model_class = mod.__dict__.get(model, None) if _model_class is not None: Gallery.register(_model_class) setattr(thismod, model, _model_class) else: setattr(thismod, model, _gen_missing_model(model, _backend))
def get_module(backend: Optional[Backend] = None): """get the module of eigher 'graphgallery.functional.tensor.tensorflow' or 'graphgallery.functional.tensor.pytorch' by 'backend'. Parameters ---------- backend: String or BackendModule, optional. 'tensorflow', 'torch', TensorFlowBackend, PyTorchBackend, etc. if not specified, return the current backend module. Returns ------- module: - 'graphgallery.functional.tensor.tensorflow' for tensorflow backend, - 'graphgallery.functional.tensor.pytorch' for pytorch backend """ backend = gg.backend(backend) if backend == "tensorflow": return tensorflow else: return pytorch
def asintarr(x, dtype: str = None): """Convert `x` to interger Numpy array. Parameters: ---------- x: Tensor, Scipy sparse matrix, Numpy array-like, etc. Returns: ---------- Integer Numpy array with dtype or `graphgallery.intx()` """ if dtype is None: dtype = intx() if is_tensor(x): if x.dtype != dtype: kind = backend().kind if kind == "T": x = tf.cast(x, dtype=dtype) else: x = x.to(getattr(torch, dtype)) return x if is_interger_scalar(x): x = np.asarray([x], dtype=dtype) elif is_list_like(x) or isinstance(x, (np.ndarray, np.matrix)): x = np.asarray(x, dtype=dtype) else: raise ValueError( f"Invalid input which should be either array-like or integer scalar, but got {type(x)}." ) return x
def __init__(self, *, device="cpu", seed=None, name=None, **cfg): """ Parameters: ---------- device: string. optional The device where the model running on. seed: interger scalar. optional Used to create a reproducible sequence of tensors across multiple calls. name: string. optional Specified name for the model. (default: :str: `class name`) cfg: other custom keyword arguments. """ gg.set_seed(seed) self.cfg = gf.BunchDict(cfg) if self.cfg: print(f"Receiving configs:\n{self.cfg}") self.device = torch.device(device) self.data_device = self.device self.backend = gg.backend() self.seed = seed self.name = name or self.__class__.__name__ self._model = None self._graph = None self._cache = gf.BunchDict() self.transform = gf.BunchDict()
def sparse_adj_to_sparse_tensor(x, kind=None): """Converts a Scipy sparse matrix to a TensorFlow/PyTorch SparseTensor. Parameters ---------- x: Scipy sparse matrix Matrix in Scipy sparse format. kind: str, optional. "T" for TensorFlow "P" for PyTorch if not specified, using `backend().kind` instead. Returns ------- S: SparseTensor Matrix as a sparse tensor. """ if kind is None: kind = backend().kind else: assert_kind(kind) if kind == "T": return T.tf_tensor.sparse_adj_to_sparse_tensor(x) else: return T.th_tensor.sparse_adj_to_sparse_tensor(x)
def normalize_edge_tensor(edge_index, edge_weight=None, n_nodes=None, fill_weight=1.0, rate=-0.5, kind=None): if kind is None: kind = backend().kind else: assert kind in {"T", "P"} if kind == "T": return tf_tensor.normalize_adj_tensor(edge_index, edge_weight=edge_weight, n_nodes=n_nodes, fill_weight=fill_weight, rate=rate) else: return th_tensor.normalize_adj_tensor(edge_index, edge_weight=edge_weight, n_nodes=n_nodes, fill_weight=fill_weight, rate=rate)
def random_seed(seed: int = None, backend: Optional[Backend] = None): backend = gg.backend(backend) np.random.seed(seed) random.seed(seed) if backend == "tensorflow": tf.random.set_seed(seed) else: torch.manual_seed(seed) torch.cuda.manual_seed(seed)
def add_selfloops_edge(edge_index, edge_weight, n_nodes=None, fill_weight=1.0, kind=None): if kind is None: kind = backend().kind else: assert kind in {"T", "P"} if kind == "T": return tf_tensor.normalize_adj_tensor(edge_index, edge_weight, n_nodes=n_nodes, fill_weight=fill_weight) else: return th_tensor.normalize_adj_tensor(edge_index, edge_weight, n_nodes=n_nodes, fill_weight=fill_weight)
def normalize_adj_tensor(adj, rate=-0.5, fill_weight=1.0, kind=None): if kind is None: kind = backend().kind else: assert kind in {"T", "P"} if kind == "T": return tf_tensor.normalize_adj_tensor(adj, rate=rate, fill_weight=fill_weight) else: return th_tensor.normalize_adj_tensor(adj, rate=rate, fill_weight=fill_weight)
def set_seed(seed: Optional[int] = None): assert seed is None or isinstance(seed, Number), seed np.random.seed(seed) random.seed(seed) if seed is not None: torch.manual_seed(seed) torch.cuda.manual_seed(seed) if backend() == 'dgl': import dgl dgl.random.seed(seed)
def sparse_edges_to_sparse_tensor(edge_index: np.ndarray, edge_weight: np.ndarray = None, shape: tuple = None, kind=None): if kind is None: kind = backend().kind else: assert kind in {"T", "P"} if kind == "T": return tf_tensor.sparse_edges_to_sparse_tensor(edge_index, edge_weight, shape) else: return th_tensor.sparse_edges_to_sparse_tensor(edge_index, edge_weight, shape)
def sparse_tensor_to_sparse_adj(x, kind=None): """Converts a SparseTensor to a Scipy sparse matrix (CSR matrix).""" if kind is None: kind = backend().kind else: assert kind in {"T", "P"} if kind == "T": return tf_tensor.sparse_tensor_to_sparse_adj(x) else: return th_tensor.sparse_tensor_to_sparse_adj(x)
def get_model(model: str, backend_name=None): backend = gg.backend(backend_name) mod = importlib.import_module(f".{backend.abbr}", __name__) _model_class = mod.__dict__.get(model, None) if _model_class is not None: return _model_class else: raise ImportError(f"model {model} is not supported by '{backend}'." " You can switch to other backends by setting" " the 'graphgallery.backend' environment.")
def normalize_adj_tensor(adj, rate=-0.5, fill_weight=1.0, kind=None): if kind is None: kind = backend().kind else: assert_kind(kind) if kind == "T": return T.tf_tensor.normalize_adj_tensor(adj, rate=rate, fill_weight=fill_weight) else: # TODO return T.th_tensor.normalize_adj_tensor(adj, rate=rate, fill_weight=fill_weight)
def is_tensor_or_variable(x): """Check whether `x` is tf.Tensor or tf.Variable or tf.RaggedTensor. Parameters: x: A python object to check. Returns: `True` iff `x` is a `tf.Tensor` or `tf.Variable` or `tf.RaggedTensor`. """ if backend().kind == "T": return any((tf.is_tensor(x), isinstance(x, tf.Variable), isinstance(x, tf.RaggedTensor), is_tf_sparse_tensor(x))) else: # TODO: is it really work for all torch tensors?? return torch.is_tensor(x)
def __init__(self, *, device="cpu", seed=None, name=None, **kwargs): """ Parameters: ---------- device: string. optional The device where the model running on. seed: interger scalar. optional Used to create a reproducible sequence of tensors across multiple calls. name: string. optional Specified name for the model. (default: :str: `class name`) kwargs: other custom keyword arguments. """ # if graph is not None and not isinstance(graph, gg.data.BaseGraph): # raise ValueError(f"Unrecognized graph: {graph}.") kwargs.pop("self", None) kwargs.pop("__class__", None) cfg = gg.CfgNode() cfg.merge_from_dict(kwargs) cfg.intx = self.intx = gg.intx() cfg.floatx = self.floatx = gg.floatx() cfg.boolx = self.boolx = gg.boolx() cfg.seed = self.seed = seed cfg.name = self.name = name or self.__class__.__name__ cfg.device = device _backend = gg.backend() cfg.backend = getattr(_backend, "name", None) if seed: gf.random_seed(seed, _backend) self.device = gf.device(device, _backend) self.data_device = self.device self.backend = _backend # data types, default: `float32`,`int32` and `bool` self._cache = gf.BunchDict() self.transform = gf.BunchDict() self._model = None self._graph = None self.cfg = cfg self.setup_cfg() self.custom_setup()
def __init__(self, *graph, device="cpu:0", seed=None, name=None, **kwargs): """ Parameters: ---------- graph: Graph or MultiGraph. device: string. optional The device where the model running on. seed: interger scalar. optional Used in combination with `tf.random.set_seed` & `np.random.seed` & `random.seed` to create a reproducible sequence of tensors across multiple calls. name: string. optional Specified name for the model. (default: :str: `class.__name__`) kwargs: other custom keyword parameters. """ graph = parse_graph_inputs(*graph) _backend = backend() self.backend = _backend self.kind = _backend.kind raise_if_kwargs(kwargs) if seed is not None: np.random.seed(seed) random.seed(seed) if self.kind == "P": torch.manual_seed(seed) torch.cuda.manual_seed(seed) # torch.cuda.manual_seed_all(seed) else: tf.random.set_seed(seed) if name is None: name = self.__class__.__name__ self.seed = seed self.name = name self.graph = graph.copy() self.device = parse_device(device, self.kind) # data types, default: `float32` and `int32` self.floatx = floatx() self.intx = intx()
def is_tensor(x): """Check whether `x` is tf.Tensor, tf.Variable, tf.RaggedTensor, tf.sparse.SparseTensor, torch.Tensor, torch.sparse.Tensor. Parameters: x: A python object to check. Returns: `True` iff `x` is a (tf or torch) (sparse-)tensor. """ if backend().kind == "T": return is_tf_tensor(x) else: return is_th_tensor(x)
def astensor(x, *, dtype=None, device=None, escape=None): try: if x is None or (escape is not None and isinstance(x, escape)): return x except TypeError: raise TypeError(f"argument 'escape' must be a type or tuple of types.") if dtype is None: dtype = gf.infer_type(x) elif isinstance(dtype, tf.dtypes.DType): dtype = dtype.name elif isinstance(dtype, (np.dtype, str)): dtype = str(dtype) else: raise TypeError( f"argument 'dtype' must be tf.dtypes.DType, np.dtype or str, but got {type(dtype)}." ) with tf.device(device): if is_tensor(x): if x.dtype != dtype: return tf.cast(x, dtype=dtype) return tf.identity(x) elif gf.is_tensor(x, backend='torch'): return astensor(gf.tensoras(x), dtype=dtype, device=device, escape=escape) elif sp.isspmatrix(x): if gg.backend() == "dgl_tf": import dgl return dgl.from_scipy(x, idtype=getattr(tf, gg.intx())).to(device) else: return sparse_adj_to_sparse_tensor(x, dtype=dtype) elif any((isinstance(x, (np.ndarray, np.matrix)), gg.is_listlike(x), gg.is_scalar(x))): return tf.convert_to_tensor(x, dtype=dtype) else: raise TypeError( f"Invalid type of inputs. Allowed data type(Tensor, SparseTensor, Numpy array, Scipy sparse matrix, None), but got {type(x)}." )
def sparse_adj_to_sparse_tensor(x, kind=None): """Converts a Scipy sparse matrix to a TensorFlow/PyTorch SparseTensor. Parameters ---------- x: scipy.sparse.sparse Matrix in Scipy sparse format. Returns ------- S: SparseTensor Matrix as a sparse tensor. """ if kind is None: kind = backend().kind else: assert kind in {"T", "P"} if kind == "T": return tf_tensor.sparse_adj_to_sparse_tensor(x) else: return th_tensor.sparse_adj_to_sparse_tensor(x)
def __init__(self, graph, device="cpu", seed=None, name=None, **kwargs): """ Parameters: ---------- graph: Graph or MultiGraph. device: string. optional The device where the model running on. seed: interger scalar. optional Used in combination with `tf.random.set_seed` & `np.random.seed` & `random.seed` to create a reproducible sequence of tensors across multiple calls. name: string. optional Specified name for the model. (default: :str: `class.__name__`) kwargs: other custom keyword arguments. """ if not isinstance(graph, gg.data.BaseGraph): raise ValueError(f"Unrecognized graph: {graph}.") _backend = gg.backend() # It currently takes no keyword arguments gg.utils.raise_error.raise_if_kwargs(kwargs) if seed: gf.random_seed(seed, _backend) if name is None: name = self.__class__.__name__ self.seed = seed self.name = name self.graph = graph.copy() self.device = gf.device(device, _backend) self.backend = _backend # data types, default: `float32`,`int32` and `bool` self.floatx = gg.floatx() self.intx = gg.intx() self.boolx = gg.boolx() self._cache = gf.BunchDict()
def load_models(backend_name=None): _backend = backend(backend_name) thismod = sys.modules[__name__] mod = importlib.import_module(f".gallery_model.{_backend.abbr}", __name__) for model in _GALLERY_MODELS: _model_class = mod.__dict__.get(model, None) if _model_class is not None: _enabled_models.add(model) setattr(thismod, model, _model_class) else: setattr(thismod, model, _gen_missing_model(model, _backend)) mod = importlib.import_module(f".sklearn_model", __name__) for model in _SKLEARN_MODELS: _model_class = mod.__dict__.get(model, None) if _model_class is not None: _enabled_models.add(model) setattr(thismod, model, _model_class) else: setattr(thismod, model, _gen_missing_model(model, _backend))
def __init__(self, dataset, device='cpu', escape=None, **kwargs): super().__init__(dataset, **kwargs) self.astensor = partial(gf.astensor, device=device, escape=escape) self.astensors = partial(gf.astensors, device=device, escape=escape) self.device = device self.backend = gg.backend()