Beispiel #1
0
def Tensor(*, data: Union[np.ndarray, Any], trainable: bool) -> ITensor:
    """
    Create a babilim tensor from a native tensor or numpy array.

    :param data: The data that should be put in a babilim tensor. This can be either a numpy array or a pytorch/tensorflow tensor.
    :param trainable: If the tensor created should be trainable. Only works for numpy tensors, native tensors overwrite this field!
    :return: An object of type babilim.core.ITensor.
    """
    if babilim.get_backend() == PYTORCH_BACKEND:
        from babilim.core.tensor_pt import Tensor as _Tensor
        from torch import Tensor as _PtTensor
        native = None
        if isinstance(data, _PtTensor):
            native = data
            data = None
        return _Tensor(data, trainable, native)
    elif babilim.get_backend() == TF_BACKEND:
        from babilim.core.tensor_tf import Tensor as _Tensor
        from tensorflow import Tensor as _TfTensor
        native = None
        if isinstance(data, _TfTensor):
            native = data
            data = None
        return _Tensor(data, trainable, native)
    else:
        raise RuntimeError(
            "No variable implementation for this backend was found. (backend={})"
            .format(babilim.get_backend()))
Beispiel #2
0
    def call(self, *args, **kwargs) -> Any:
        """
        Makes a module callable and contains the forward pass of your model.
        This should be pure computation and not allocate any weights.
        Allocating weights should be done in the `build` function.

        This function gets called by `__call__` and itself passes all calls to `_call_pytorch` and `_call_tf`.
        Furthermore, it takes care of unwrapping the tensors into native tensors before calling and wrapping them again after calling.
        This allows the native functions `_call_pytorch` and `_call_tf` to be pure pytorch or tensorflow code.
        All subclasses must implement `_call_pytorch` and `_call_tf`.

        You should call this module in the following style (this ensures the module is build on first run):
        ```
        module = MyModule()
        result = module(*args, **kwargs)
        ```

        Parameters:
        :param *args: You can specify any parameters you want.
        :param **kwargs: You can specify any named parameters you want.
        """
        args = self._wrapper.unwrap(args)
        kwargs = self._wrapper.unwrap(kwargs)
        if is_backend(PYTORCH_BACKEND):
            results = self._call_pytorch(*args, **kwargs)
        elif is_backend(TF_BACKEND):
            results = self._call_tf(*args, **kwargs)
        else:
            raise RuntimeError("Unknown Backend: {}".format(get_backend()))

        results = self._wrapper.wrap(results)
        return results
Beispiel #3
0
    def build(self, *args, **kwargs) -> None:
        """
        Build the model, this function automatically calls the native build with the tensors unwrapped.

        This function gets called by `__call__` and itself passes all calls to `_build_pytorch` and `_build_tf`.
        Furthermore, it takes care of unwrapping the tensors into native tensors before calling and wrapping them again after calling.
        This allows the native functions `_build_pytorch` and `_build_tf` to be pure pytorch or tensorflow code.
        All subclasses must implement `_build_pytorch` and `_build_tf`.

        You should never call the build function directly. Call this module in the following style (this ensures the module is build on first run):
        ```
        module = MyModule()
        result = module(*args, **kwargs)  # <- Build gets called internally here.
        ```

        Parameters:
        :param *args: You must specify the exact same parameters as for your call.
        :param **kwargs: You must specify the exact same parameters as for your call.
        """
        args = self._wrapper.unwrap(args)
        kwargs = self._wrapper.unwrap(kwargs)
        if is_backend(PYTORCH_BACKEND):
            self._build_pytorch(*args, **kwargs)
        elif is_backend(TF_BACKEND):
            self._build_tf(*args, **kwargs)
        else:
            raise RuntimeError("Unknown Backend: {}".format(get_backend()))
Beispiel #4
0
def GradientTape(variables: List) -> object:
    """
    Collect the gradients for the block within a with statement.

    :param variables: The variables for which the gradients should be tracked.
    """
    if babilim.get_backend() == PYTORCH_BACKEND:
        from babilim.core.gradient_tape_pt import GradientTapePT
        return GradientTapePT(variables)
    elif babilim.get_backend() == TF_BACKEND:
        from babilim.core.gradient_tape_tf import GradientTapeTF
        return GradientTapeTF(variables)
    else:
        raise RuntimeError(
            "No variable implementation for this backend was found. (backend={})"
            .format(babilim.get_backend()))
Beispiel #5
0
def TensorWrapper() -> ITensorWrapper:
    """
    Create a tensor wrapper object.
    
    Sometimes it is nescesarry to implement stuff in native pytorch or native tensorflow. Here the tensor wrapper can help.

    **WARNING: Instead of directly using the TensorWrapper, you should prefer using the babilim.module.Lambda!**
    """
    if babilim.get_backend() == PYTORCH_BACKEND:
        from babilim.core.tensor_pt import TensorWrapper as _TensorWrapper
        return _TensorWrapper()
    elif babilim.get_backend() == TF_BACKEND:
        from babilim.core.tensor_tf import TensorWrapper as _TensorWrapper
        return _TensorWrapper()
    else:
        raise RuntimeError(
            "No variable implementation for this backend was found. (backend={})"
            .format(babilim.get_backend()))
Beispiel #6
0
 def __exit__(self, type, value, traceback):
     _device_stack.pop()
     if babilim.is_backend(TF_BACKEND):
         self.native_device.__exit__()
         self.native_device = None
     elif babilim.is_backend(PYTORCH_BACKEND):
         pass
     else:
         raise RuntimeError(
             "No implementation for this backend was found. (backend={})".
             format(babilim.get_backend()))
Beispiel #7
0
 def __enter__(self):
     _device_stack.append(self.name)
     if babilim.is_backend(TF_BACKEND):
         import tensorflow as tf
         self.native_device = tf.device(get_current_device_native_format())
         self.native_device.__enter__()
     elif babilim.is_backend(PYTORCH_BACKEND):
         pass
     else:
         raise RuntimeError(
             "No implementation for this backend was found. (backend={})".
             format(babilim.get_backend()))
     return self
Beispiel #8
0
def get_current_device_native_format() -> str:
    """
    Get a string specifying the currently selected default device in the backend specific native format.
    
    When you manually assign a device, you should always use this device.
    """
    name = _device_stack[-1]
    if babilim.is_backend(TF_BACKEND):
        return "/" + name
    elif babilim.is_backend(PYTORCH_BACKEND):
        import torch
        if torch.cuda.is_available():
            return name.replace("gpu", "cuda")
        else:
            return "cpu"
    else:
        raise RuntimeError(
            "No implementation for this backend was found. (backend={})".
            format(babilim.get_backend()))