def test_create_variable_exception():
    with pytest.raises(TypeError):
        x = Variable(True)
    with pytest.raises(TypeError):
        x = Variable("haha")
    with pytest.raises(TypeError):
        x = Variable(2.2, True)
    with pytest.raises(TypeError):
        x = Variable(2.2, "haha")
def test_rpow_exception():
    X = Variable([1, 3, 3, 1, 2])
    var_list = X.unroll([2, 2, 1])
    x = var_list[0]
    y = var_list[1]
    z = var_list[2]
    a = x + y
    with pytest.raises(TypeError):
        b = "g"**y
    with pytest.raises(ValueError):
        b = 2**a
def test_mul_exception():
    X = Variable([1, 2, 2, 1, 4])
    var_list = X.unroll([2, 2, 1])
    x = var_list[0]
    y = var_list[1]
    z = var_list[2]
    a = x + y
    with pytest.raises(TypeError):
        b = a * "g"
    with pytest.raises(TypeError):
        b = False * y
def test_sub_exception():
    X = Variable([1, 2, 2, 1])
    var_list = X.unroll([2, 2])
    x = var_list[0]
    y = var_list[1]
    z = y + x
    a = z + [4.4, 3.3]
    b = a + z
    with pytest.raises(TypeError):
        c = b - "g"
    with pytest.raises(TypeError):
        c = True - b
Пример #5
0
def test_create_function_exception():
    with pytest.raises(NotImplementedError):
        f = F.Function()
        x = Variable([1, 2, 3])
        y = f(x)
    with pytest.raises(NotImplementedError):
        f = F.Function()
        x = Variable([1, 2, 3])
        y = f.get_grad(x)
    with pytest.raises(NotImplementedError):
        f = F.Function()
        x = Variable([1, 2, 3])
        y = f.get_val(x)
def test_neg():
    X = Variable([1, 3, 3, 1, 4])
    var_list = X.unroll([2, 2, 1])
    x = var_list[0]
    y = var_list[1]
    z = var_list[2]
    a = x + y
    b = a / z
    c = -b
    assert close(c.val, -np.asarray([[1], [1]]))
    assert close(
        c.grad,
        np.asarray([[-0.25, 0, -0.25, 0, 0.25], [0, -0.25, 0, -0.25, 0.25]]))
Пример #7
0
def test_ew_exp():
    # 3->3
    X = Variable([1, 2, 3])
    out = F.ew_exp(X)
    real_values = np.exp(np.array([1, 2, 3])).reshape(-1, 1)
    real_grad = np.array([[np.exp(1), 0, 0], [0, np.exp(2), 0],
                          [0, 0, np.exp(3)]])
    #print(real_values.shape, real_grad.shape)
    assert (out.val == real_values).all()
    assert (out.grad == real_grad).all()
    X = Variable([1])
    out = F.ew_exp(X)
    real_values = np.exp(1.)
    real_grad = np.array(np.exp(1)).reshape(1, 1)
def test_add():
    X = Variable([1, 2, 2, 1])
    var_list = X.unroll([2, 2])
    x = var_list[0]
    y = var_list[1]
    z = x + y
    assert close(z.val, np.asarray([[3], [3]]))
    assert close(z.grad, np.asarray([[1, 0, 1, 0], [0, 1, 0, 1]]))
    a = z + [4.4, 3.3]
    b = a + z
    assert close(b.val, np.asarray([[10.4], [9.3]]))
    assert close(b.grad, np.asarray([[2, 0, 2, 0], [0, 2, 0, 2]]))
    c = [4.4, 3.3] + b
    assert close(c.val, np.asarray([[14.8], [12.6]]))
    assert close(c.grad, np.asarray([[2, 0, 2, 0], [0, 2, 0, 2]]))
def test_pow_exception():
    X = Variable([1, 3, 3, 1, 2])
    var_list = X.unroll([2, 2, 1])
    x = var_list[0]
    y = var_list[1]
    z = var_list[2]
    a = x + y
    with pytest.raises(TypeError):
        b = a**"g"
    with pytest.raises(ValueError):
        b = a**[3, 3]
    with pytest.raises(ValueError):
        b = a**z
    with pytest.raises(ValueError):
        b = z**a
def test_eq():
    X = Variable([1, 3, 3, 1, 4])
    var_list = X.unroll([2, 2, 1])
    x = var_list[0]
    y = var_list[1]
    z = var_list[2]
    a = x + y
    b = a + [4.4, 3.3]
    c = a - x
    assert c == y
    assert c != x
    c = b - a
    assert c == [4.4, 3.3]
    assert [4.4, 3.3] == c
    assert [1, 1] != c
    assert c != [1, 1]
Пример #11
0
def test_logistic():
    x = Variable(2)
    logi = F.Logistic()
    y = logi(x)
    assert close(y.val, 1.0 / (1.0 + np.exp(-(2)))) and close(
        y.grad,
        (1.0 / (1.0 + np.exp(-(2)))) * (1.0 - (1.0 / (1.0 + np.exp(-(2))))))
def test_mul():
    X = Variable([1, 2, 2, 1, 4])
    var_list = X.unroll([2, 2, 1])
    x = var_list[0]
    y = var_list[1]
    z = var_list[2]
    a = x + y
    b = a * z
    assert close(b.val, np.asarray([[12], [12]]))
    assert close(b.grad, np.asarray([[4, 0, 4, 0, 3], [0, 4, 0, 4, 3]]))
    c = b * 4
    assert close(c.val, np.asarray([[48], [48]]))
    assert close(c.grad, np.asarray([[16, 0, 16, 0, 12], [0, 16, 0, 16, 12]]))
    d = 2 * b
    assert close(d.val, np.asarray([[24], [24]]))
    assert close(d.grad, np.asarray([[8, 0, 8, 0, 6], [0, 8, 0, 8, 6]]))
Пример #13
0
def test_tanh():
    x = Variable(1)
    tanh = F.Tanh()
    y = tanh(x)
    assert close(y.val, np.tanh(1)) and close(y.grad,
                                              (np.cosh(1)**2 - np.sinh(1)**2) /
                                              (np.cosh(1)**2))
Пример #14
0
 def __call__(self, x):
     """Implements the chain rule.
     INPUTS
     =======
     x: autodiff.Variable holding a val and grad
 
     RETURNS
     ========
     autodiff.Variable: updated Variable after chain rule was applied 
     """
     # if autodiff.config.mode == 'forward':
     if isinstance(x, Variable):
         out_val = self.get_val(x.val)
         out_grad = np.dot(self.get_grad(x.val), x.grad)
         # out_grad = np.dot(self.get_grad(x.val), x.grad)
         return Variable(val=out_val, grad=out_grad)
     elif isinstance(x, ReverseVariable):
         out_val = self.get_val(x.val)
         res = ReverseVariable(out_val)
         x.children.append(res)
         res.left = x
         res.leftgrad = self.get_grad(x.val)
         return res
     else:
         raise ValueError("Not a variable!")
def test_unroll_exception():
    X = Variable([1, 2, 3])
    with pytest.raises(TypeError):
        var_list = X.unroll(1)
    with pytest.raises(ValueError):
        var_list = X.unroll(['a'])
    with pytest.raises(ValueError):
        var_list = X.unroll([0, 1, 2])
    with pytest.raises(ValueError):
        var_list = X.unroll([2, 1, 1])
    y = Variable(1)
    with pytest.raises(ValueError):
        var_list = y.unroll([1, 1])
def test_sub():
    X = Variable([1, 2, 2, 1])
    var_list = X.unroll([2, 2])
    x = var_list[0]
    y = var_list[1]
    z = y + x
    a = z + [4.4, 3.3]
    b = a + z
    c = b - x
    assert close(c.val, np.asarray([[9.4], [7.3]]))
    assert close(c.grad, np.asarray([[1, 0, 2, 0], [0, 1, 0, 2]]))
    d = 3 - c
    assert close(d.val, np.asarray([[-6.4], [-4.3]]))
    assert close(d.grad, np.asarray([[-1, 0, -2, 0], [0, -1, 0, -2]]))
    e = c - 2.1
    assert close(e.val, np.asarray([[7.3], [5.2]]))
    assert close(e.grad, np.asarray([[1, 0, 2, 0], [0, 1, 0, 2]]))
def test_pow():
    X = Variable([1, 3, 3, 1, 2])
    var_list = X.unroll([2, 2, 1])
    x = var_list[0]
    y = var_list[1]
    z = var_list[2]
    a = x + y
    b = z**2
    assert close(b.val, 4)
    assert close(b.grad, np.asarray([[0, 0, 0, 0, 4]]))
    b = a**2
    assert close(b.val, np.asarray([[16], [16]]))
    assert close(b.grad, np.asarray([[8, 0, 8, 0, 0], [0, 8, 0, 8, 0]]))
    c = z
    d = z**c
    assert close(d.val, 4)
    assert close(d.grad, np.asarray([[0, 0, 0, 0, 4 * (np.log(2) + 1)]]))
def test_div():
    X = Variable([1, 3, 3, 1, 4])
    var_list = X.unroll([2, 2, 1])
    x = var_list[0]
    y = var_list[1]
    z = var_list[2]
    a = x + y
    b = a / z
    assert close(b.val, np.asarray([[1], [1]]))
    assert close(
        b.grad,
        np.asarray([[0.25, 0, 0.25, 0, -0.25], [0, 0.25, 0, 0.25, -0.25]]))
    c = b / 0.25
    assert close(c.val, np.asarray([[4], [4]]))
    assert close(c.grad, np.asarray([[1, 0, 1, 0, -1], [0, 1, 0, 1, -1]]))
    d = 4 / z
    assert close(d.val, 1)
    assert close(d.grad, np.asarray([[0, 0, 0, 0, -0.25]]))
Пример #19
0
def test_log():
    x = Variable(2)
    log = F.Log()
    y = log(x)
    assert close(y.val, np.log(2)) and close(y.grad, 0.5)
    log2 = F.Log(2)
    y = log2(x)
    assert close(y.val, 1)
    assert close(y.grad, 0.5 / np.log(2))
Пример #20
0
def test_ew_sinus():
    # 3->3
    X = Variable([1, 2, 3])
    out = F.ew_sin(X)
    real_values = np.sin(np.array([1, 2, 3])).reshape(-1, 1)
    real_grad = np.array([[np.cos(1), 0, 0], [0, np.cos(2), 0],
                          [0, 0, np.cos(3)]])
    #print(real_values.shape, real_grad.shape)
    #print(out.val, out.grad)
    #print(real_values, real_grad)
    assert (out.val == real_values).all()
    assert (out.grad == real_grad).all()
Пример #21
0
def newtons_method(function, guess, epsilon):
    x = Variable(guess)
    f = function(x)
    i = 0
    max_out = False
    while abs(f.val) >= epsilon and max_out == False:
        x = x - f.val / f.grad
        f = function(x)
        print('Current x: {}'.format(x.val))
        i += 1
        if i >= 10000:
            max_out = True
    print('The root of the function is: {}'.format(x.val))
def test_div_exception():
    X = Variable([1, 3, 3, 1, 0])
    var_list = X.unroll([2, 2, 1])
    x = var_list[0]
    y = var_list[1]
    z = var_list[2]
    a = x + y
    with pytest.raises(ValueError):
        b = a / x
    with pytest.raises(ValueError):
        b = a / z
    with pytest.raises(ValueError):
        b = 4.0 / z
    with pytest.raises(ValueError):
        b = a / 0.0
    with pytest.raises(ValueError):
        b = a / np.asarray([1, 1])
    with pytest.raises(ValueError):
        b = 4.0 / x
    with pytest.raises(TypeError):
        b = a / "g"
    with pytest.raises(TypeError):
        a = True / y
Пример #23
0
def test_concat_exception():
    X = Variable([1, 2, 3])
    Y = Variable([1, 2])
    _, _, x = X.unroll()
    _, y = Y.unroll()
    with pytest.raises(AssertionError):
        f = F.concat([x, y])
    with pytest.raises(AssertionError):
        f = F.concat([])
Пример #24
0
def test_concat_values_shapes():
    X = Variable([1, 2, 3])
    x, y, z = X.unroll()
    f1 = x + y
    f2 = x * y + z
    #=========================
    #Concatenate 2 scalar
    #=========================
    conc = F.concat([f1, f2])
    real_v = np.array([[3, 5]], dtype=np.float64).T
    real_gradients = np.array([[1, 1, 0], [2, 1, 1]], dtype=np.float64)
    assert (real_v == conc.val).all(), "Value or Shape Error for the value"
    assert (real_gradients == conc.grad
            ).all(), "Value or Shape Error for the value"
    #), "Value or Shape Error for the value"
    #=========================
    #Concatenate scalar and vector
    #=========================
    new_conc = F.concat([f1, conc])
    real_v = np.array([[3, 3, 5]], dtype=np.float64).T
    real_gradients = np.array([[1, 1, 0], [1, 1, 0], [2, 1, 1]],
                              dtype=np.float64)
    assert (real_v == new_conc.val).all(), "Value or Shape Error for the value"
    assert (real_gradients == new_conc.grad
            ).all(), "Value or Shape Error for the value"
    #=========================
    #Concatenate vector and vector
    #=========================
    full_conc = F.concat([new_conc, conc])
    real_v = np.array([[3, 3, 5, 3, 5]], dtype=np.float64).T
    real_gradients = np.array(
        [[1, 1, 0], [1, 1, 0], [2, 1, 1], [1, 1, 0], [2, 1, 1]],
        dtype=np.float64)
    assert (
        real_v == full_conc.val).all(), "Value or Shape Error for the value"
    assert (real_gradients == full_conc.grad
            ).all(), "Value or Shape Error for the value"
Пример #25
0
def test_assertion_error():
    init_point = np.array([4, 5])
    init_point = Variable(init_point)

    def loss_fn(X):
        return X + 1

    with pytest.raises(AssertionError):
        gd = optim.Optimizer(0.01, 0.0001, loss_fn, init_point)
    with pytest.raises(AssertionError):
        gd = optim.Optimizer(-0.01, 0.0001, loss_fn, init_point)
    with pytest.raises(AssertionError):
        gd = optim.Optimizer(0.01, -0.0001, loss_fn, init_point)
    with pytest.raises(AssertionError):
        gd = optim.Optimizer(np.ndarray(2), 0.0001, loss_fn, init_point)
    with pytest.raises(AssertionError):
        gd = optim.Optimizer(2, np.array(0.0001), loss_fn, init_point)
def test_create_variable():
    X = Variable([1, 2, 3])
    assert close(X.val, np.asarray([[1], [2], [3]]))
    assert close(X.grad, np.eye(3))
    var_list = X.unroll()
    x = var_list[0]
    assert close(x.val, 1)
    assert close(x.grad, np.asarray([[1, 0, 0]]))
    var_list = X.unroll([2, 1])
    x = var_list[0]
    assert close(x.val, np.asarray([[1], [2]]))
    assert close(x.grad, np.asarray([[1, 0, 0], [0, 1, 0]]))
    y = Variable(1)
    var_list = y.unroll()
    x = var_list[0]
    assert close(x.val, 1)
    assert close(x.grad, 1)
    var_list = y.unroll([1])
    x = var_list[0]
    assert close(x.val, 1)
    assert close(x.grad, 1)
Пример #27
0
def test_input_exception():
    x = Variable([1, 1])
    with pytest.raises(ValueError):
        exp = F.Exponent()
        y = exp(x)
    with pytest.raises(ValueError):
        sin = F.Sinus()
        y = sin(x)
    with pytest.raises(ValueError):
        cos = F.Cosinus()
        y = cos(x)
    with pytest.raises(ValueError):
        tan = F.Tangent()
        y = tan(x)
    with pytest.raises(ValueError):
        arcsin = F.Arcsin()
        y = arcsin(x)
    with pytest.raises(ValueError):
        arccos = F.Arccos()
        y = arccos(x)
    with pytest.raises(ValueError):
        arctan = F.Arctan()
        y = arctan(x)
    with pytest.raises(ValueError):
        sinh = F.Sinh()
        y = sinh(x)
    with pytest.raises(ValueError):
        cosh = F.Cosh()
        y = cosh(x)
    with pytest.raises(ValueError):
        tanh = F.Tanh()
        y = tanh(x)
    with pytest.raises(ValueError):
        log = F.Log()
        y = log(x)
    with pytest.raises(ValueError):
        logi = F.Logistic()
        y = logi(x)
    with pytest.raises(ValueError):
        sqrt = F.Sqrt()
        y = sqrt(x)
Пример #28
0
 def minimize(self, nb_steps, keep_track=True):
     """
     Keep track is a bool-> True returns the different losses/points obtained durring optim.
     """
     trajectory = []
     losses = []
     it = 0
     loss = Variable(val=self.tol +
                     1)  #Randomly initialize the loss to get into the
     while it < nb_steps and loss.val > self.tol:  #Keeping the loss positive
         loss = self._eval(self.current_point)
         #self.current_point -= self.lr * loss.grad #By doing this, we directly create a new Variable
         self._step(loss)
         #keep track of our thing
         trajectory.append(self.current_point.val)
         losses.append(loss.val)
         it += 1
     print('Minimized the function for {} steps.'.format(it))
     if keep_track:
         assert self.current_point.val.shape
         return self.current_point, losses, trajectory
     else:
         return self.current_point
Пример #29
0
    def __init__(self, lr, tol, loss_fn, init_point):
        """
        init_point: numpy.array or autodiff.Variable
        loss_fn should be a function wrapped around autodiff module
            loss_fn input: N-Dimensional Variable
            loss_fn output: 1-Dimensional Variable.
            Should be positive by definition of a loss function.
            Any type of function could be used.
        Remark: We enforce the loss_fn to be a 1-Dimensional,
        because the space of real numbers is a implicit ordered set.
        lr: strictly positive float
        tol: strictly positive float-Stops the iterations whenever the loss_fn
        becomes strictly inferior to tol.
        """
        assert isinstance(lr, (int, float)) and not isinstance(
            lr, bool), "lr should be numeric type"
        assert isinstance(tol, (int, float)) and not isinstance(
            tol, bool), "tol should be numeric type"

        assert lr > 0., "Need a positive learning rate"
        assert tol > 0., "Need a positive learning rate"
        self.lr = lr
        self.tol = tol
        self.loss_fn = loss_fn
        #Check whether we will be working with variable indeed.
        try:
            self.current_point = Variable(init_point)
        except TypeError as e:
            if isinstance(init_point, Variable):
                self.current_point = init_point
            else:
                raise TypeError(e)
        #Check whether the loss_fn is in the right space
        out = loss_fn(self.current_point)
        assert isinstance(out.val,
                          float), "The loss function should be scalar-output"
Пример #30
0
def concat(var_list: list):
    """
    If x, y variables, it should let the user define conc_x,y = F.concat([x,y]) which is now a multivariate stuff. 
    Assume we have two variables in R^2 and R^3 respectively.
    There are supposed to have the same input space, for instance X^10 so that the gradients are 10,2 and 10,3 dimensions.
    var_list has to be a list of var. 
    """
    assert len(var_list) > 0, 'Can not concatenate an empty list'
    input_dim = var_list[0].grad.shape[
        1]  #grad shape of the first variable in the list
    concat_val, concat_grad = [], []
    for var in var_list:
        assert var.grad.shape[
            1] == input_dim, 'trying to concatenate variables from a different input space'
        if isinstance(var.val, float):
            concat_val.append(np.array(var.val).reshape(-1, 1))
        else:  #We already have an array
            concat_val.append(var.val)
        concat_grad.append(var.grad)
        #print(var.grad.shape)
        #print(len(concat_val))
    out_val = np.concatenate(concat_val)  # We have list that must be changed
    out_grad = np.concatenate(concat_grad, axis=0)
    return Variable(val=out_val, grad=out_grad)