Exemplo n.º 1
0
 def __init__(self, L=1.0, k=1.0, x0=0):
     self.L = get_right_shape(L)
     self.k = get_right_shape(k)
     self.x0 = get_right_shape(x0)
     if not isinstance(self.L, float) or not isinstance(
             self.k, float) or not isinstance(self.x0, float):
         raise ValueError("Error input for logistic function!")
Exemplo n.º 2
0
def test_reshape_array_types():
    # ============================================
    # Test whether we can reshape a mispecified array into the desired dimension/type
    # ============================================
    with pytest.raises(TypeError):
        #array with non np.float64
        x = [1, 'Hey']
        utils.get_right_shape(np.array(x))
Exemplo n.º 3
0
 def __rmul__(self, other):
     """Implements multiplication between other objects and Variables.
         See __add__ for reference.
     """
     out_val = np.dot(self.val, get_right_shape(other))
     res = ReverseVariable(out_val)
     self.children.append(res)
     res.left = self
     res.leftgrad = get_right_shape(other)
     return res
Exemplo n.º 4
0
 def __eq__(self, other):
     #TODO-DOC about this
     if isinstance(other, Variable):
         if close(self.val, other.val) and close(self.grad, other.grad):
             return True
         else:
             return False
     else:
         new_val = get_right_shape(other)
         if close(self.val, new_val) and close(self.grad, get_right_shape(np.zeros(self.grad.shape))):
             return True
         else:
             return False
Exemplo n.º 5
0
 def __pow__(self, other):
     """Implements exponentiation between Variables and other objects,
         which are numeric values.
 
     INPUTS
     =======
     other: Float, or int.
         The power to which we are exponentiating our Variable.
 
     RETURNS
     ========
     Variable: A new variable whose val and grad are those resulting
         from the exponentiation of our Variable and other.
 
     EXAMPLES
     =========
     >>> x = Variable(2.0)
     >>> z = x ** 3
     >>> z.val
     8.0
     >>> z.grad
     12.0
     """
     if isinstance(other, Variable):
         if not isinstance(other.val, float):
             raise ValueError("Exponent cannot be a vector!")
         if not isinstance(self.val, float):
             raise ValueError("Base as a vector not supported!")
         out_val = self.val ** other.val
         out_grad = np.dot(out_val, (np.dot(other.grad, np.log(self.val)) + np.dot(other.val / self.val, self.grad)))
         return Variable(val=out_val, grad=out_grad)
     else:
         new_val = get_right_shape(other)
         if not isinstance(new_val, float):
             raise ValueError("Exponent cannot be a vector!")
         if isinstance(self.val, float):
             out_val = self.val ** new_val
             out_grad = np.dot(np.dot(new_val, (self.val ** (new_val - 1))), self.grad)
         else:
             out_val = []
             for i in range(self.val.shape[0]):
                 out_val.append(self.val[i, 0] ** new_val)
             out_val = get_right_shape(out_val)
             height = self.grad.shape[0]
             width = self.grad.shape[1]
             o_grad = np.zeros(self.grad.shape)
             for i in range(height):
                 for j in range(width):
                     o_grad[i, j] = np.dot(np.dot(new_val, (self.val[i, 0] ** (new_val - 1))), self.grad[i, j])
             out_grad = o_grad
         return Variable(val=out_val, grad=out_grad)
Exemplo n.º 6
0
 def __truediv__(self, other):
     """Implements division between Variables.
         See __add__ for reference.
     """
     if isinstance(other, ReverseVariable):
         if not isinstance(other.val, float):
             raise ValueError("Vector cannot be the denominator!")
         if close(other.val, 0):
             raise ValueError("Divided by 0!")
         out_val = self.val / other.val
         res = ReverseVariable(out_val)
         self.children.append(res)
         other.children.append(res)
         res.left = self
         res.leftgrad = 1.0 / other.val
         res.right = other
         res.rightgrad = -self.val / (other.val ** 2)
         return res
     else:
         new_val = get_right_shape(other)
         if not isinstance(new_val, float):
             raise ValueError("Vector cannot be the denominator!")
         if close(new_val, 0):
             raise ValueError("Divided by 0!")
         out_val = self.val / new_val
         res = ReverseVariable(out_val)
         self.children.append(res)
         res.left = self
         res.leftgrad = 1.0 / new_val
         return res
Exemplo n.º 7
0
 def __rpow__(self, other):
     """Implements exponentiation between other objects, which are
         numeric values, and variables.
 
     INPUTS
     =======
     other: Float, or int.
         The base, which we are exponentiating to our Variable.
 
     RETURNS
     ========
     Variable: A new variable whose val and grad are those resulting
         from the exponentiation of other and Variable.
 
     EXAMPLES
     =========
     >>> x = Variable(2.0)
     >>> z = 3 ** x
     >>> z.val
     9.0
     >>> z.grad
     9.887510598012987
     """
     new_val = get_right_shape(other)
     # Change later for vector variables
     # if new_val <= 0:
         # raise ValueError("Power base cannot be smaller than 0!")
     if not isinstance(self.val, float):
         raise ValueError("Exponent canont be a vector!")
     out_val = new_val ** self.val
     # def _pow(a):
         # return np.log(new_val) * (new_val ** self.val) * a
     # out_grad = self.single_grad(_pow, self.grad)
     out_grad = np.dot(np.dot(np.log(new_val), (new_val ** self.val)), self.grad)
     return Variable(val=out_val, grad=out_grad) 
Exemplo n.º 8
0
 def __truediv__(self, other):
     """Implements division between Variables and other objects.
         See __mul__ for reference.
     """
     #Multi-dim: should be np.dot
     #make_sure_shape(self,other)
     #TODO-1: Make sure the other element is non-zero, Write utils.
     #TODO-2: Extension to vector/multi-dim
     if isinstance(other, Variable):
         if not isinstance(other.val, float):
             raise ValueError("Vector cannot be the denominator")
         if abs(other.val) < 1e-4:
             raise ValueError("Divided by 0!") 
         out_val = self.val / other.val
         # def _div(a, b):
             # return (a * other.val - self.val * b) / (other.val ** 2)
         # out_grad = self.merge_grad(_div, self.grad, other.grad)
         out_grad = (np.dot(self.grad, other.val) - np.dot(self.val, other.grad)) / (other.val ** 2)
         return Variable(val=out_val, grad=out_grad)
     else: 
         new_val = get_right_shape(other)
         if not isinstance(new_val, float):
             raise ValueError("Vector cannot be the denominator")
         if abs(new_val) < 1e-4:
             raise ValueError("Divided by 0!")
         out_val = self.val / new_val
         # def _div(a):
             # return a / new_val
         # out_grad = self.single_grad(_div, self.grad)
         out_grad = self.grad / new_val
         return Variable(val=out_val, grad=out_grad)
Exemplo n.º 9
0
def test_get_right_shape_result():
    # ====================================
    # Test the last utils handling list, tuple, array
    # =====================================
    correct_x = np.array([33., 2.], dtype=np.float64)
    ans = np.asarray([[33.], [2.]])
    #List
    assert (utils.get_right_shape([33, 2]) == ans).all()
    #Tuple
    assert (utils.get_right_shape((33, 2)) == ans).all()
    #List of list mispecified
    assert (utils.get_right_shape([[33], [2]]) == ans).all()
    #1-d matrix
    assert (utils.get_right_shape(np.array([33, 2])) == ans).all()
    #Vector
    assert (utils.get_right_shape(np.array([[33], [2]])) == ans).all()
Exemplo n.º 10
0
 def __radd__(self, other):
     """Implements addition between other objects and Variables.
         See __add__ for reference.
     """
     new_val = get_right_shape(other)
     out_val = self.val + new_val
     out_grad = self.grad
     return Variable(val=out_val, grad=out_grad)
Exemplo n.º 11
0
 def __init__(self, base=None):
     if base != None:
         self.base = get_right_shape(base)
         if not isinstance(self.base, float):
             raise ValueError("Not a valid base!")
         if self.base <= 0:
             raise ValueError("Not a valid base!")
     else:
         self.base = None
Exemplo n.º 12
0
 def __rsub__(self, other):
     """Implements subtraction between other objects and Variables.
         See __sub__ for reference.
     """
     out_val = get_right_shape(other) - self.val
     # def _neg(a):
         # return -a
     # out_grad = self.single_grad(_neg, self.grad)
     out_grad = -self.grad
     return Variable(val=out_val, grad=out_grad)
Exemplo n.º 13
0
 def __rsub__(self, other):
     """Implements substraction between other objects and Variables.
         See __add__ for reference.
     """
     out_val = get_right_shape(other) - self.val
     res = ReverseVariable(out_val)
     self.children.append(res)
     res.left = self
     res.leftgrad = -1
     return res
Exemplo n.º 14
0
 def __rmul__(self, other):
     """Implements multiplication between other objects and Variables.
         See __mul__ for reference.
     """
     new_val = get_right_shape(other)
     out_val = np.dot(new_val, self.val)
     # def _mul(a):
         # return new_val * a
     # out_grad = self.single_grad(_mul, self.grad)
     out_grad = np.dot(self.grad, new_val) 
     return Variable(val=out_val, grad=out_grad)
Exemplo n.º 15
0
    def __init__(self, val, grad=None): 
        """
        Variables are initialized with a value and a gradient.

         INPUTS
        =======
        val: float, int, 1-D tuple, or 1-D list, required.
            Is the value of the variable. Currently handles numeric and
            1-D types, but will be extended to take multidimensional input
            in the near future.

        grad: float or int, optional. Default value is 1 (the seed).
            Is the gradient of the variable.

        EXAMPLES
        =========
        >>> x = Variable(2.0)
        >>> x.val
        2.0
        >>> x = Variable((2))
        >>> x.val
        2.0
        >>> x = Variable(np.array([2, 3]))
        >>> x.val
        array([[2.],
               [3.]])
        """

        # Assure val and grad are correct shape (in preparation for
        # multivariate implementation)
        self.val = get_right_shape(val)
        #We now assume that grad is a n-dimensional element, where n=len(val).
        if grad is None: #if created from scratch.
            if isinstance(self.val, float):
                self.grad = 1.0
            else:
                self.grad = np.eye(self.val.shape[0])
        else:
            #If not created from scratch, assumes we already have a gradient under the right form.
            self.grad = get_right_shape(grad)
Exemplo n.º 16
0
 def __mul__(self, other):
     """Implements multiplication between Variables.
         See __add__ for reference.
     """
     if isinstance(other, ReverseVariable):
         out_val = np.dot(self.val, other.val)
         res = ReverseVariable(out_val)
         self.children.append(res)
         other.children.append(res)
         res.left = self
         res.leftgrad = other.val
         res.right = other
         res.rightgrad = self.val
         return res
         # out_grad = get_right_shape([other.val, self.val])
         # children = [self, other]
     else:
         out_val = np.dot(self.val, get_right_shape(other))
         res = ReverseVariable(out_val)
         self.children.append(res)
         res.left = self
         res.leftgrad = get_right_shape(other)
         return res
Exemplo n.º 17
0
 def __rtruediv__(self, other):
     """Implements division between other objects and Variables.
         See __div__ for reference.
     """
     new_val = get_right_shape(other)
     if not isinstance(self.val, float):
         raise ValueError("Vector cannot be the denominator")
     if abs(self.val) < 1e-4:
         raise ValueError("Divided by 0!")
     out_val = new_val / self.val
     # def _div(a):
         # return -new_val * a / (self.val ** 2)
     # out_grad = self.single_grad(_div, self.grad)
     out_grad = -np.dot(new_val, self.grad) / (self.val ** 2)
     return Variable(val=out_val, grad=out_grad)
Exemplo n.º 18
0
 def __rpow__(self, other):
     """Implements power between other objects and Variables.
         See __add__ for reference.
     """
     new_val = get_right_shape(other)
     # if new_val <= 0:
         # raise ValueError("Power base cannot be smaller than 0!")
     if not isinstance(self.val, float):
         raise ValueError("The exponent canont be a multi-dimension vector!")
     out_val = new_val ** self.val
     res = ReverseVariable(out_val)
     self.children.append(res)
     res.left = self
     res.leftgrad = np.dot(np.log(new_val), (new_val ** self.val))
     return res
Exemplo n.º 19
0
 def __rtruediv__(self, other):
     """Implements division between other objects and Variables.
         See __add__ for reference.
     """
     new_val = get_right_shape(other)
     if not isinstance(self.val, float):
         raise ValueError("Vector cannot be the denominator!")
     if close(self.val, 0):
             raise ValueError("Divided by 0!")
     out_val = new_val / self.val
     res = ReverseVariable(out_val)
     self.children.append(res)
     res.left = self
     res.leftgrad = -new_val / (self.val ** 2)
     return res
Exemplo n.º 20
0
 def __sub__(self, other):
     """Implements subtraction between Variables and other objects.
         See __add__ for reference.
     """
     if isinstance(other, Variable):
         out_val = self.val - other.val
         # def _sub(a, b):
             # return a - b
         # out_grad = self.merge_grad(_sub, self.grad, other.grad)
         out_grad = self.grad - other.grad
         return Variable(val=out_val, grad=out_grad)
     else:
         new_val = get_right_shape(other)
         out_val = self.val - new_val
         out_grad = self.grad
         return Variable(val=out_val, grad=out_grad)
Exemplo n.º 21
0
 def __add__(self, other):
     """Implements addition between Variables and other objects, 
         which are either Variables or numeric values. 
 
     INPUTS
     =======
     other: Variable, float, or int.
         The object with which we are adding our Variable.
 
     RETURNS
     ========
     Variable: A new variable whose val and grad are those resulting
         from the summation of our Variable and other.
 
     EXAMPLES
     =========
     >>> x = ReverseVariable(4)
     >>> y = ReverseVariable(2)
     >>> z = x + y
     >>> z.val
     6.0
     >>> z.reverse()
     >>> x.grad
     1.0
     >>> y.grad
     1.0
     """
     if isinstance(other, ReverseVariable):
         out_val = self.val + other.val
         res = ReverseVariable(out_val)
         self.children.append(res)
         other.children.append(res)
         res.left = self
         res.leftgrad = 1.0
         res.right = other
         res.rightgrad = 1.0
         return res
         # out_grad = get_right_shape([1., 1.])
         # children = [self, other]
         # return ReverseVariable(out_val, out_grad, children=children)
     else:
         out_val = self.val + get_right_shape(other)
         res = ReverseVariable(out_val)
         self.children.append(res)
         res.left = self
         res.leftgrad = 1.0
         return res
Exemplo n.º 22
0
def test_get_right_shape_types():
    # ====================================
    # Test the last utils handling list, tuple, array
    # =====================================
    #Bool
    with pytest.raises(TypeError):
        utils.get_right_shape(True)
    with pytest.raises(TypeError):
        utils.get_right_shape(False)
    # Dict
    with pytest.raises(TypeError):
        utils.get_right_shape({'x':3})
Exemplo n.º 23
0
 def __mul__(self, other):
     """Implements multiplication between Variables and other objects,
         which are either Variables or numeric values.
 
     INPUTS
     =======
     other: Variable, float, int, or vectors.
         The object with which we are multiplying our Variable.
         It will be either a vector times a scalar, or a scalar times a vector
         If you want matrix multiplication, please use Dot function
 
     RETURNS
     ========
     Variable: A new variable whose val and grad are those resulting
         from the multiplication of our Variable and other.
 
     EXAMPLES
     =========
     >>> X = Variable([2, 4])
     >>> var_list = X.unroll()
     >>> x = var_list[0]
     >>> y = var_list[1]
     >>> z = x * y
     >>> z.val
     8.0
     >>> z.grad
     array([[4., 2.]])
     """
     if isinstance(other, Variable):
         out_val = np.dot(self.val, other.val)
         # def _mul(x, y):
             # return x * other.val + self.val * y
         # out_grad = self.merge_grad(_mul, self.grad, other.grad)
         out_grad = np.dot(self.grad, other.val) + np.dot(self.val, other.grad)
         return Variable(val=out_val, grad=out_grad)
     else:
         new_val = get_right_shape(other)
         out_val = np.dot(self.val, new_val)
         # def _mul(a):
             # return a * new_val
         # out_grad = self.single_grad(_mul, self.grad)
         out_grad = np.dot(self.grad, new_val)
         return Variable(val=out_val, grad=out_grad)
Exemplo n.º 24
0
 def __sub__(self, other):
     """Implements substraction between Variables.
         See __add__ for reference.
     """
     if isinstance(other, ReverseVariable):
         out_val = self.val - other.val
         res = ReverseVariable(out_val)
         self.children.append(res)
         other.children.append(res)
         res.left = self
         res.leftgrad = 1
         res.right = other
         res.rightgrad = -1
         return res
     else:
         out_val = self.val - get_right_shape(other)
         res = ReverseVariable(out_val)
         self.children.append(res)
         res.left = self
         res.leftgrad = 1
         return res
Exemplo n.º 25
0
 def __pow__(self, other):
     """Implements power between Variables.
         See __add__ for reference.
     """
     if isinstance(other, ReverseVariable):
         if not isinstance(other.val, float):
             raise ValueError("Exponent not a number")
         if not isinstance(self.val, float):
             raise ValueError("Base a vector not supported")
         out_val = self.val ** other.val
         res = ReverseVariable(out_val)
         self.children.append(res)
         other.children.append(res)
         res.left = self
         res.leftgrad = np.dot(other.val, (self.val ** (other.val - 1)))
         res.right = other
         res.rightgrad = np.dot(np.log(other.val), self.val ** other.val)
         return res
     else:
         new_val = get_right_shape(other)
         if not isinstance(new_val, float):
             raise ValueError("Exponent not a number")
         # if self.val <= 0:
             # raise ValueError("Power base cannot be smaller than 0!")
         if isinstance(self.val, float):
             out_val = self.val ** new_val
             res = ReverseVariable(out_val)
             self.children.append(res)
             res.left = self
             res.leftgrad = np.dot(new_val, (self.val ** (new_val - 1)))
             return res
         else:
             out_val = [val ** new_val for val in self.val]
             res = ReverseVariable(out_val)
             self.children.append(res)
             res.left = self
             res.leftgrad = np.zeros((self.val.shape[0], self.val.shape[0]))
             for i in range(self.val.shape[0]):
                 res.leftgrad[i, i] = np.dot(new_val, (self.val[i, 0] ** (new_val - 1)))
             return res
Exemplo n.º 26
0
 def __add__(self, other):
     """Implements addition between Variables and other objects, 
         which are either Variables or numeric values. 
 
     INPUTS
     =======
     other: Variable, float, or int.
         The object with which we are adding our Variable.
 
     RETURNS
     ========
     Variable: A new variable whose val and grad are those resulting
         from the summation of our Variable and other.
 
     EXAMPLES
     =========
     >>> X = Variable([2, 4])
     >>> var_list = X.unroll()
     >>> x = var_list[0]
     >>> y = var_list[1]
     >>> z = x + y
     >>> z.val
     6.0
     >>> z.grad
     array([[1., 1.]])
     """
     if isinstance(other, Variable):
         out_val = self.val + other.val
         # def _add(a, b):
         #     return a + b
         # out_grad = self.merge_grad(_add, self.grad, other.grad)
         out_grad = self.grad + other.grad
         return Variable(val=out_val, grad=out_grad)
     else:
         new_val = get_right_shape(other)
         out_val = self.val + new_val
         out_grad = self.grad
         return Variable(val=out_val, grad=out_grad)
Exemplo n.º 27
0
    def __init__(self, val) :
        """
        ReverseVariables are initialized with a value.

         INPUTS
        =======
        val: float, int, 1-D tuple, or 1-D list, required.
            Is the value of the variable. Currently handles numeric and
            1-D types, but will be extended to take multidimensional input
            in the near future.

        grad: float or int, optional. Default value is 1 (the seed).
            Is the gradient of the variable.

        EXAMPLES
        =========
        >>> x = ReverseVariable(2.0)
        >>> x.val
        2.0
        >>> x = ReverseVariable((2))
        >>> x.val
        2.0
        >>> x = ReverseVariable(np.array([2, 3]))
        >>> x.val
        array([[2.],
               [3.]])
        """

        self.children = []
        self.val = get_right_shape(val)
        self.grad = None
        self.left = None
        self.leftgrad = None
        self.right = None
        self.rightgrad = None
        self.tag = 0