Esempio n. 1
0
def cos(tensor: Tensor) -> Tensor:
    """
    Function implements the cos function in autograd
    :param tensor: (Tensor) Input tensor
    :return: (Tensor) Output tensor
    """
    # Apply cos
    output = np.cos(tensor.data)
    # Check if gradient is needed
    requires_grad = tensor.requires_grad
    # Make backward function if needed
    if requires_grad:
        def grad_cos(grad: np.ndarray) -> np.ndarray:
            """
            Function computes gradient of the cos function
            :param grad: (Tensor) Previous gradient
            :return: (Tensor) Gradient
            """
            return grad * (- np.sin(tensor.data))

        dependency = [Dependency(activation=tensor, grad_fn=grad_cos)]
    else:
        dependency = None

    return Tensor(data=output, requires_grad=requires_grad, dependencies=dependency)
Esempio n. 2
0
def log(tensor: Tensor) -> Tensor:
    """
    Function implements the natural logarithm in autograd.
    :param tensor: (Tensor) Input tensor
    :return: (Tensor) Output tensor
    """
    # Apply exp
    output = np.log(tensor.data)
    # Check if gradient is needed
    requires_grad = tensor.requires_grad
    # Make backward function if needed
    if requires_grad:
        def grad_log(grad: np.ndarray) -> np.ndarray:
            """
            Function computes gradient of the sin function
            :param grad: (Tensor) Previous gradient
            :return: (Tensor) Gradient
            """
            return grad * (1 / (tensor.data))

        dependency = [Dependency(activation=tensor, grad_fn=grad_log)]
    else:
        dependency = None

    return Tensor(data=output, requires_grad=requires_grad, dependencies=dependency)
Esempio n. 3
0
def exp(tensor: Tensor) -> Tensor:
    """
    Function implements the element-wise exponential function
    :param tensor: (Tensor) Input tensor
    :return: (Tensor) Output tensor
    """
    # Apply exp
    output = np.exp(tensor.data)
    # Check if gradient is needed
    requires_grad = tensor.requires_grad
    # Make backward function if needed
    if requires_grad:
        def grad_exp(grad: np.ndarray) -> np.ndarray:
            """
            Function computes gradient of the sin function
            :param grad: (Tensor) Previous gradient
            :return: (Tensor) Gradient
            """
            return grad * np.exp(tensor.data)

        dependency = [Dependency(activation=tensor, grad_fn=grad_exp)]
    else:
        dependency = None

    return Tensor(data=output, requires_grad=requires_grad, dependencies=dependency)
Esempio n. 4
0
def leaky_relu(tensor: Tensor, negative_slope: float = 0.2) -> Tensor:
    """
    Function implements the leaky-relu function in autograd
    :param tensor: (Tensor) Input tensor
    :param negative_slope: (float) Negative slope of leaky-relu
    :return: (Tensor) Output Tensor
    """
    # Apply leaky-relu
    output = np.maximum(tensor.data, negative_slope * tensor.data)
    # Check if gradient is needed
    requires_grad = tensor.requires_grad
    # Make backward function if needed
    if requires_grad:
        def grad_relu(grad: np.ndarray) -> np.ndarray:
            """
            Function computes the gradient of the leaky-relu function
            :param grad: (Tensor) Previous gradient
            :return: (Tensor) Gradient
            """
            return grad * np.where(output > 0.0, 1.0, negative_slope)

        dependency = [Dependency(activation=tensor, grad_fn=grad_relu)]
    else:
        dependency = None

    return Tensor(data=output, requires_grad=requires_grad, dependencies=dependency)
Esempio n. 5
0
def sigmoid(tensor: Tensor) -> Tensor:
    """
    Function implements the sigmoid function in autograd
    :param tensor: (Tensor) Input tensor
    :return: (Tensor) Output tensor
    """
    # Apply sigmoid
    output = 1 / (1 + np.exp(- tensor.data))
    # Check if gradient is needed
    requires_grad = tensor.requires_grad
    # Make backward function if needed
    if requires_grad:
        def grad_sigmoid(grad: np.ndarray) -> np.ndarray:
            """
            Function computes gradient of the sigmoid function
            :param grad: (Tensor) Previous gradient
            :return: (Tensor) Gradient
            """
            return grad * (output * (1 - output))

        dependency = [Dependency(activation=tensor, grad_fn=grad_sigmoid)]
    else:
        dependency = None

    return Tensor(data=output, requires_grad=requires_grad, dependencies=dependency)
Esempio n. 6
0
def elu(tensor: Tensor, alpha: float = 1.) -> Tensor:
    """
    Function implements the elu function in autograd
    :param tensor: (Tensor) Input tensor
    :param alpha: (float) Alpha parameter of exponential slope
    :return: (Tensor) Output Tensor
    """
    # Apply elu
    output = np.where(tensor.data > 0.0, tensor.data, alpha * (np.exp(tensor.data) - 1))
    # Check if gradient is needed
    requires_grad = tensor.requires_grad
    # Make backward function if needed
    if requires_grad:
        def grad_elu(grad: np.ndarray) -> np.ndarray:
            """
            Function computes the gradient of the elu function
            :param grad: (Tensor) Previous gradient
            :return: (Tensor) Gradient
            """
            return grad * (np.where(tensor.data > 0.0, 1.0, alpha * np.exp(tensor.data)))

        dependency = [Dependency(activation=tensor, grad_fn=grad_elu)]
    else:
        dependency = None

    return Tensor(data=output, requires_grad=requires_grad, dependencies=dependency)
Esempio n. 7
0
def sqrt(tensor: Tensor) -> Tensor:
    data = np.sqrt(tensor.data)
    requires_grad = tensor.requires_grad

    if requires_grad:

        def grad_fn(grad: np.ndarray) -> np.ndarray:
            return grad * (0.5 * (1 / (data + 1e-18)))

        last_op = [Dependency(tensor, grad_fn)]
    else:
        last_op = []
    return Tensor(data, requires_grad, last_op)
Esempio n. 8
0
def tanh(tensor: Tensor) -> Tensor:
    data = np.tanh(tensor.data)
    requires_grad = tensor.requires_grad
    if requires_grad:

        def grad_fn(grad: np.ndarray) -> np.ndarray:
            return grad * (1 - data * data)

        depends_on = [Dependency(tensor, grad_fn)]
    else:
        depends_on = []

    return Tensor(data, requires_grad, depends_on)
Esempio n. 9
0
def relu(tensor: Tensor) -> Tensor:
    data = np.maximum(0, tensor.data)
    requires_grad = tensor.requires_grad

    if requires_grad:

        def grad_fn(grad: np.ndarray) -> np.ndarray:
            return grad * np.ones_like(data)

        depends_on = [Dependency(tensor, grad_fn)]
    else:
        depends_on = []

    return Tensor(data, requires_grad, depends_on)
Esempio n. 10
0
def sigmoid(tensor: Tensor) -> Tensor:
    data = 1 / (1 + np.exp(tensor.data))
    requires_grad = tensor.requires_grad

    if requires_grad:

        def grad_fn(grad: np.ndarray) -> np.ndarray:
            return grad * (1 - data) * data

        depends_on = [Dependency(tensor, grad_fn)]
    else:
        depends_on = []

    return Tensor(data, requires_grad, depends_on)