Example #1
0
 def as_tensor(self, x, convert_external=True):
     if self.is_tensor(x, only_native=convert_external):
         tensor = x
     elif isinstance(x, np.ndarray):
         tensor = tf.convert_to_tensor(
             SciPyBackend(precision=self.precision).as_tensor(x))
     else:
         tensor = tf.convert_to_tensor(x)
     # --- Enforce Precision ---
     if not isinstance(tensor, numbers.Number):
         if isinstance(tensor, np.ndarray):
             tensor = SciPyBackend(
                 precision=self.precision).as_tensor(tensor)
         elif tensor.dtype.is_floating and self.has_fixed_precision:
             tensor = self.to_float(tensor)
     return tensor
Example #2
0
 def as_tensor(self, x, convert_external=True):
     if self.is_tensor(x, only_native=convert_external):
         tensor = x
     elif isinstance(x, np.ndarray):
         tensor = torch.from_numpy(SciPyBackend(precision=self.precision).as_tensor(x))
     elif isinstance(x, (tuple, list)):
         try:
             tensor = torch.tensor(x)
         except ValueError:  # there may be Tensors inside the list
             components = [self.as_tensor(c) for c in x]
             tensor = torch.stack(components, dim=0)
     else:
         tensor = torch.tensor(x)
     # --- Enforce Precision ---
     if self.is_tensor(tensor, only_native=True):
         if tensor.dtype.is_floating_point and self.has_fixed_precision:
             tensor = self.to_float(tensor)
     return tensor
Example #3
0
    axis_gradient,
    laplace,
    fourier_laplace,
    fourier_poisson,
    fftfreq,
    abs_square,
    downsample2x,
    upsample2x,
    interpolate_linear,
    spatial_sum,
)
from .batched import BATCHED, ShapeMismatch
from . import optim

# Setup Backend
DYNAMIC_BACKEND.add_backend(SciPyBackend())
DYNAMIC_BACKEND.add_backend(StructBroadcastBackend(DYNAMIC_BACKEND))


def set_precision(floating_point_bits):
    """
    Sets the floating point precision of DYNAMIC_BACKEND which affects all registered backends.

    If `floating_point_bits` is an integer, all floating point tensors created henceforth will be of the corresponding data type, float16, float32 or float64.
    Operations may also convert floating point values to this precision, even if the input had a different precision.

    If `floating_point_bits` is None, new tensors will default to float32 unless specified otherwise.
    The output of math operations has the same precision as its inputs.

    :param floating_point_bits: one of (16, 32, 64, None)
    """
Example #4
0
 def is_tensor(self, x, only_native=False):
     if not only_native and SciPyBackend().is_tensor(x, only_native=False):
         return True
     return isinstance(
         x, (tf.Tensor, tf.Variable, tf.SparseTensor, tf.Operation))
Example #5
0
 def dtype(self, array):
     if self.is_tensor(array, only_native=True):
         return array.dtype.as_numpy_dtype
     else:
         return SciPyBackend().dtype(array)