def __init__(self): Backend.__init__(self, "Jax", default_device=None) try: self.rnd_key = jax.random.PRNGKey(seed=0) except RuntimeError as err: warnings.warn(f"{err}") self.rnd_key = None
def __init__(self): Backend.__init__(self, "Jax", default_device=None) try: self.rnd_key = jax.random.PRNGKey(seed=0) except NameError: # Jax not imported self.rnd_key = None except RuntimeError: self.rnd_key = None
def __init__(self): default_device = '/' + os.path.basename(tf.zeros(()).device) default_device = ComputeDevice(self, default_device, default_device.split(":")[-2], -1, -1, "", default_device) for device in self.list_devices(): if device.name == default_device.name: default_device = device Backend.__init__(self, "TensorFlow", default_device)
def __init__(self): gpus = self.list_devices('GPU') cpus = self.list_devices('CPU') Backend.__init__(self, "Jax", default_device=gpus[0] if gpus else cpus[0]) try: self.rnd_key = jax.random.PRNGKey(seed=0) except RuntimeError as err: warnings.warn(f"{err}", RuntimeWarning) self.rnd_key = None
def add(self, a, b): with self._device_for(a, b): if isinstance(a, tf.SparseTensor) or isinstance( b, tf.SparseTensor): return tf.sparse.add(a, b, threshold=1e-5) else: return Backend.add(self, a, b)
def mul(self, a, b): # if scipy.sparse.issparse(a): # TODO sparse? # return a.multiply(b) # elif scipy.sparse.issparse(b): # return b.multiply(a) # else: return Backend.mul(self, a, b)
def conjugate_gradient_adaptive(self, lin, y, x0, rtol, atol, max_iter, trj: bool) -> SolveResult or List[SolveResult]: if callable(lin) or trj: assert self.is_available(y), "Tracing conjugate_gradient with linear operator is not yet supported." return Backend.conjugate_gradient_adaptive(self, lin, y, x0, rtol, atol, max_iter, trj) assert isinstance(lin, torch.Tensor) and lin.is_sparse, "Batched matrices are not yet supported" y = self.to_float(y) x0 = self.copy(self.to_float(x0)) rtol = self.as_tensor(rtol) atol = self.as_tensor(atol) max_iter = self.as_tensor(max_iter) x, residual, iterations, function_evaluations, converged, diverged = torch_sparse_cg_adaptive(lin, y, x0, rtol, atol, max_iter) return SolveResult(f"Φ-Flow CG ({'PyTorch*' if self.is_available(y) else 'TorchScript'})", x, residual, iterations, function_evaluations, converged, diverged, "")
def __init__(self): self.cpu = ComputeDevice(self, "CPU", 'CPU', -1, -1, "", ref='cpu') Backend.__init__(self, 'PyTorch', default_device=self.cpu)
def auto_cast(self, *tensors) -> list: tensors = [t if isinstance(t, (numbers.Number, bool)) else self.as_tensor(t, True) for t in tensors] return Backend.auto_cast(self, *tensors)
def __init__(self): cpu = NUMPY.cpu self.cpu = ComputeDevice(self, "CPU", 'CPU', cpu.memory, cpu.processor_count, cpu.description, ref='cpu') gpus = self.list_devices('GPU') Backend.__init__(self, 'PyTorch', default_device=gpus[0] if gpus else cpu)
def __init__(self): Backend.__init__(self, "TensorFlow", default_device=None)
def linear_solve(self, method: str, lin, y, x0, rtol, atol, max_iter, trj: bool) -> SolveResult or List[SolveResult]: if method == 'auto' and not trj and not self.is_available(y): return self.conjugate_gradient(lin, y, x0, rtol, atol, max_iter, trj) else: return Backend.linear_solve(self, method, lin, y, x0, rtol, atol, max_iter, trj)