def test_new_method_binary_two_names():
    def _multiply_then_halve(a: Union[int, float, np.number, np.ndarray], b: Union[int, float, np.number, np.ndarray]) -> Union[int, float, np.number, np.ndarray]:
        return (a*b)/2
        
    assert np.all(_multiply_then_halve(np.ones(4), np.arange(4)) == np.array([0, 0.5, 1, 1.5]))
    
    with temp_variable_method_names('mth', 'multiple_and_then_take_half_after'):
        
        @Variable.new_method('mth', 'multiple_and_then_take_half_after')
        def multiply_then_halve(a: Union[int, float, np.number, np.ndarray], b: Union[int, float, np.number, np.ndarray]) -> np.ndarray:
            a_is_var = isinstance(a, Variable)
            b_is_var = isinstance(b, Variable)
            if a_is_var and b_is_var:
                return Variable(_multiply_then_halve(a.data, b.data))
            elif a_is_var:
                return Variable(_multiply_then_halve(a.data, b))
            elif b_is_var:
                return Variable(_multiply_then_halve(a, b.data))
            else:
                return _multiply_then_halve(a, b)
        
        var_a = Variable(10)
        var_b = Variable(5)
        assert np.all(var_a.mth(var_b).data == 25)
        assert np.all(var_a.multiple_and_then_take_half_after(var_b).data == 25)
        assert 'multiply_then_halve' not in dir(var_a)
def test_new_method_unary_one_name():
    def _mult_ten(operand: Union[int, float, np.number, np.ndarray]) -> Union[int, float, np.number, np.ndarray]:
        return operand*10
        
    assert np.all(_mult_ten(np.ones(4)) == np.full([4], 10))
    
    with temp_variable_method_names('mult_ten_special_name'):
        
        @Variable.new_method('mult_ten_special_name')
        def mult_ten(operand: VariableOperand) -> np.ndarray:
            if isinstance(operand, Variable):
                return Variable(_mult_ten(operand.data))
            else:
                return _mult_ten(operand)
        
        # Verify 1-D arrays
        var = Variable(np.arange(3))
        assert np.all(var.mult_ten_special_name().data == np.array([00, 10, 20]))

        # Verify 2-D arrays
        var = Variable(np.arange(4).reshape([2,2]))
        assert np.all(var.mult_ten_special_name().data == np.array([[00, 10], [20, 30]]))

        # Verify 3-D arrays
        var = Variable(np.arange(8).reshape([2,2,2]))
        assert np.all(var.mult_ten_special_name().data == np.array([[[00, 10], [20, 30]], [[40, 50], [60, 70]]]))
 def multiply_then_halve(a: Union[int, float, np.number, np.ndarray], b: Union[int, float, np.number, np.ndarray]) -> np.ndarray:
     a_is_var = isinstance(a, Variable)
     b_is_var = isinstance(b, Variable)
     if a_is_var and b_is_var:
         return Variable(_multiply_then_halve(a.data, b.data))
     elif a_is_var:
         return Variable(_multiply_then_halve(a.data, b))
     elif b_is_var:
         return Variable(_multiply_then_halve(a, b.data))
     else:
         return _multiply_then_halve(a, b)
示例#4
0
def test_variable_dot():
    a_array = np.arange(5)
    b_array = np.array([3, 8, 5, 6, 8])
    a = Variable(np.arange(5, dtype=float))
    b = Variable(np.array([3, 8, 5, 6, 8], dtype=float))
    expected_result = 68

    assert np.all(a_array == a.data)
    assert np.all(b_array == b.data)
    assert id(a_array) != id(a.data)
    assert id(b_array) != id(b.data)

    def validate_result(result, expected_type: type) -> None:
        assert result == expected_result
        assert isinstance(result, expected_type)
        return

    # Variable + Variable
    validate_result(a.dot(b), Variable)
    validate_result(np.dot(a, b), Variable)

    # numpy + numpy
    validate_result(np.dot(a_array, b_array), np.int64)
    validate_result(np.ndarray.dot(a_array, b_array), np.int64)
    validate_result(a_array.dot(b_array), np.int64)

    # Variable + numpy
    validate_result(a.dot(b_array), Variable)
    validate_result(np.dot(a, b_array), Variable)

    # numpy + Variable
    validate_result(np.dot(a_array, b), Variable)
    validate_result(np.ndarray.dot(a_array, b), Variable)
    validate_result(a_array.dot(b), Variable)

    # Verify Derivative
    sgd = autograd.optimizer.SGD(learning_rate=1e-3)
    dot_product = a.dot(b)
    variable_to_gradient = sgd.take_training_step(dot_product)
    assert all(
        isinstance(var, Variable) and isinstance(grad, np.ndarray)
        for var, grad in variable_to_gradient.items())
    assert np.all(variable_to_gradient[a] == b_array)
    assert np.all(variable_to_gradient[b] == a_array)

    # Verify Trainability
    x = Variable(np.random.rand(2))
    y = 0
    sgd = autograd.optimizer.SGD(learning_rate=1e-4)
    for training_step_index in range(50):
        y_hat = x.dot(np.array([-10, 50]))
        diff = np.subtract(y, y_hat)
        loss = diff**2
        if 0 < loss < 1e-3:
            break
        sgd.take_training_step(loss)
    assert 0 < loss < 1e-3
示例#5
0
def test_variable_log():
    a_array = np.array([1, 2])
    a = Variable(np.array([1, 2], dtype=float))
    expected_result_variable = Variable(np.array([0, 0.69314718]))
    expected_result_array = np.array([0, 0.69314718])

    assert np.all(a_array == a.data)
    assert np.all(expected_result_variable == expected_result_array)

    assert id(a_array) != id(a.data)
    assert id(expected_result_variable) != id(expected_result_array)

    def validate_variable_result(result) -> None:
        assert expected_result_variable.isclose(result).all()
        assert isinstance(result, Variable)
        return

    def validate_array_result(result) -> None:
        assert np.isclose(result, expected_result_array).all()
        assert isinstance(result, np.ndarray)
        return

    # Variable
    validate_variable_result(np.log(a))
    validate_variable_result(a.log())
    validate_variable_result(a.natural_log())

    # numpy
    validate_array_result(np.log(a_array))

    # Verify Derivative
    sgd = autograd.optimizer.SGD(learning_rate=1e-3)
    log_result = a.log()
    variable_to_gradient = sgd.take_training_step(log_result)
    assert all(
        isinstance(var, Variable) and isinstance(grad, np.ndarray)
        for var, grad in variable_to_gradient.items())
    assert np.all(variable_to_gradient[a] == np.array([1, 0.5]))

    # Verify Trainability
    x = Variable(np.array([0.1, 0.2]))
    y = 1
    sgd = autograd.optimizer.SGD(learning_rate=1)
    for training_step_index in range(1_000):
        y_hat = x.log()
        diff = np.subtract(y, y_hat)
        loss = np.sum(diff**2)
        if training_step_index > 10 and loss.sum() < 1e-10:
            break
        sgd.take_training_step(loss)
示例#6
0
文件: xor.py 项目: kraglik/surface
def main():

    model = Model()
    model.optim = optim.Adam(model)

    data = [(np.array([0, 0]), np.array(0)), (np.array([1, 0]), np.array(1)),
            (np.array([0, 1]), np.array(1)), (np.array([1, 1]), np.array(0))]

    for i in range(1000):
        train_data = random.choices(data, k=4)

        for input, target in train_data:
            model.optim.fit([Variable(input)], target)

    for input, target in data:
        prediction = model.forward(Variable(input)).data.tolist()

        print(f"{input.tolist()} -> {prediction} (target: {target.tolist()})")
示例#7
0
    def __init__(self, kernel_sizes, in_channels, out_channels):
        super(Conv2D, self).__init__()

        self.in_channels = in_channels
        self.out_channels = out_channels

        self.kernels = [
            Variable.random(*kernel_sizes) for _ in range(out_channels)
        ]
        self.biases = [
            Variable.random(*kernel_sizes) for _ in range(out_channels)
        ]
        self._parameters.update(
            {f"kernel_{i}": kernel
             for i, kernel in enumerate(self.kernels)})
        self._parameters.update(
            {f"bias_{i}": bias
             for i, bias in enumerate(self.biases)})
def test_division_equal_to_negative_power():
    # Variables
    initial_values = np.full(4, 1.0)
    x_1 = Variable(initial_values.copy())
    x_2 = Variable(initial_values.copy())

    # Optimizer
    learning_rate = 1e-6
    sgd_1 = autograd.optimizer.SGD(learning_rate)
    sgd_2 = autograd.optimizer.SGD(learning_rate)

    # Verify Results
    for _ in range(10_000):

        quotient_1 = Variable(np.full(4, 5, dtype=float)).divide(x_1)
        quotient_2 = Variable(np.full(4, 5, dtype=float)).multiply(x_2**-1)

        diff_1 = np.subtract(40, quotient_1)
        diff_2 = np.subtract(40, quotient_2)

        loss_1 = np.sum(diff_1**2)
        loss_2 = np.sum(diff_2 * diff_2)

        assert np.all(np.isclose(loss_1.data, loss_2.data))

        variable_to_gradient_1 = sgd_1.take_training_step(loss_1)
        variable_to_gradient_2 = sgd_2.take_training_step(loss_2)

        assert np.all(
            np.isclose(variable_to_gradient_1[x_1],
                       variable_to_gradient_2[x_2]))
        assert np.all(
            np.isclose(variable_to_gradient_1[quotient_1],
                       variable_to_gradient_2[quotient_2]))
        assert np.all(
            np.isclose(variable_to_gradient_1[diff_1],
                       variable_to_gradient_2[diff_2]))
        assert np.all(
            np.isclose(variable_to_gradient_1[loss_1],
                       variable_to_gradient_2[loss_2]))

        if loss_1 < 1e-6:
            break
示例#9
0
def test_variable_exp():
    a_array = np.array([1, 2])
    a = Variable(np.array([1, 2], dtype=float))
    expected_result_variable = Variable(
        np.array([2.718281828459045, 7.3890560989306495]))
    expected_result_array = np.array([2.718281828459045, 7.3890560989306495])

    assert np.all(a_array == a.data)
    assert np.all(expected_result_variable == expected_result_array)

    assert id(a_array) != id(a.data)
    assert id(expected_result_variable) != id(expected_result_array)

    def validate_variable_result(result) -> None:
        assert expected_result_variable.isclose(result).all()
        assert isinstance(result, Variable)
        return

    def validate_array_result(result) -> None:
        assert np.isclose(result, expected_result_array).all()
        assert isinstance(result, np.ndarray)
        return

    # Variable
    validate_variable_result(np.exp(a))
    validate_variable_result(a.exp())

    # numpy
    validate_array_result(np.exp(a_array))

    # Verify Derivative
    sgd = autograd.optimizer.SGD(learning_rate=1e-3)
    exp_result = a.exp()
    variable_to_gradient = sgd.take_training_step(exp_result)
    assert all(
        isinstance(var, Variable) and isinstance(grad, np.ndarray)
        for var, grad in variable_to_gradient.items())
    assert np.all(
        np.isclose(variable_to_gradient[a], exp_result, rtol=1e-3, atol=1e-4))

    # Verify Trainability
    x = Variable(np.random.rand(2))
    y = 1
    sgd = autograd.optimizer.SGD(learning_rate=1)
    for training_step_index in range(1_000):
        y_hat = x.exp()
        diff = np.subtract(y, y_hat)
        loss = np.sum(diff**2)
        if training_step_index > 10 and loss.sum() < 1e-15:
            break
        sgd.take_training_step(loss)
示例#10
0
def test_variable_sum():
    a_array = np.arange(5)
    a = Variable(np.arange(5, dtype=float))
    expected_result_variable = Variable(10)
    expected_result_number = 10

    assert np.all(a_array == a.data)
    assert np.all(expected_result_variable == expected_result_number)

    assert id(a_array) != id(a.data)
    assert id(expected_result_variable) != id(expected_result_number)

    def validate_variable_result(result) -> None:
        assert result.eq(expected_result_variable).all()
        assert isinstance(result, Variable)
        return

    def validate_number_result(result) -> None:
        assert np.all(result == expected_result_number)
        float(result)  # error means it can't be converted to a float
        assert isinstance(result, np.number)
        return

    # Variable
    validate_variable_result(a.sum())
    validate_variable_result(np.sum(a))

    # numpy
    validate_number_result(a_array.sum())
    validate_number_result(np.sum(a_array))

    # Verify Derivative
    sgd = autograd.optimizer.SGD(learning_rate=1e-3)
    summation = a.sum()
    variable_to_gradient = sgd.take_training_step(summation)
    assert all(
        isinstance(var, Variable) and isinstance(grad, np.ndarray)
        for var, grad in variable_to_gradient.items())
    assert np.all(variable_to_gradient[a] == np.ones(a.shape))

    # Verify Trainability
    x = Variable(np.random.rand(2))
    y = 10
    sgd = autograd.optimizer.SGD(learning_rate=1e-1)
    for training_step_index in range(1_000):
        y_hat = x.sum()
        diff = np.subtract(y, y_hat)
        loss = diff**2
        if training_step_index > 10 and loss.sum() < 1e-10:
            break
        sgd.take_training_step(loss)
示例#11
0
def test_variable_depended_on_variables():
    '''
    The computation graph is:

                  f
                 /|
                / |
               e  |
               |  |
               d  |
             / |  |
            |  |  |
            |  c  |
            | / \ |
            |/   \|
            a     b

    '''
    a = Variable(np.random.rand(8))
    b = Variable(np.random.rand(8))
    c = a.dot(b)
    d = a - c
    e = d ** 2
    f = b - e
    topologically_sorted_variables = list(f.depended_on_variables())
    assert len(topologically_sorted_variables) == len(set(topologically_sorted_variables)) == 6
    topologically_sorted_variable_ids = eager_map(id, topologically_sorted_variables)
    a_index = topologically_sorted_variable_ids.index(id(a))
    b_index = topologically_sorted_variable_ids.index(id(b))
    c_index = topologically_sorted_variable_ids.index(id(c))
    d_index = topologically_sorted_variable_ids.index(id(d))
    e_index = topologically_sorted_variable_ids.index(id(e))
    f_index = topologically_sorted_variable_ids.index(id(f))
    assert f_index < e_index
    assert f_index < b_index
    assert e_index < d_index
    assert d_index < a_index
    assert d_index < c_index
    assert c_index < a_index
    assert c_index < b_index
示例#12
0
def test_variable_negative():
    a_array = np.array([1, 2])
    a = Variable(np.array([1, 2], dtype=float))
    expected_result_variable = Variable(np.array([-1.0, -2.0]))
    expected_result_array = np.array([-1, -2])

    assert np.all(a_array == a.data)
    assert np.all(expected_result_variable == expected_result_array)

    assert id(a_array) != id(a.data)
    assert id(expected_result_variable) != id(expected_result_array)

    def validate_variable_result(result) -> None:
        assert expected_result_variable.equal(result).all()
        assert isinstance(result, Variable)
        return

    def validate_array_result(result) -> None:
        assert np.equal(result, expected_result_array).all()
        assert isinstance(result, np.ndarray)
        return

    # Variable
    validate_variable_result(np.negative(a))
    validate_variable_result(a.negative())

    # numpy
    validate_array_result(np.negative(a_array))

    # Verify Derivative
    sgd = autograd.optimizer.SGD(learning_rate=1e-3)
    negative_result = a.negative()
    variable_to_gradient = sgd.take_training_step(negative_result)
    assert all(
        isinstance(var, Variable) and isinstance(grad, np.ndarray)
        for var, grad in variable_to_gradient.items())
    assert np.all(np.equal(variable_to_gradient[a], np.full(a.shape, -1.0)))

    # Verify Trainability
    x = Variable(np.random.rand(2))
    y = 1
    sgd = autograd.optimizer.SGD(learning_rate=1e-1)
    for training_step_index in range(1_000):
        y_hat = x.negative()
        diff = np.subtract(y, y_hat)
        loss = np.sum(diff**2)
        if training_step_index > 10 and loss.sum() < 1e-15:
            break
        var2grad = sgd.take_training_step(loss)
    def fit(self, loader: DataLoader, optimizer=None, loss_function=None) -> None:
        """
        Fits the model to the data.
        If no optimizer is passed in, the default optimizer is SGD.
        If no loss function is passed in, the default loss function is MSE.  :returns: None; self.params are fit to the data.
        """
        if optimizer is None:
            optimizer = SGD(0.01)

        if loss_function is None:
            loss_function = mean_squared_error

        for X, y in loader:
            if self.params is None:
                self.params = Matrix([[Variable(random.random())] for _ in range(len(X[0]))])
                self.bias = Matrix([[Variable(random.random())]])

            output = self._evaluate(X)
            loss = loss_function(output, y)
            loss += self._regularize()
            self.params = optimizer.step(self.params, loss.get_grad(self.params))
            self.bias = optimizer.step(self.bias, loss.get_grad(self.bias))
示例#14
0
def test_variable_abs():
    a_array = np.array([0, -1, -2, 3])
    a = Variable(np.array([0, -1, -2, 3], dtype=float))
    expected_result_variable = Variable(np.arange(4))
    expected_result_array = np.arange(4)

    assert np.all(a_array == a.data)
    assert np.all(expected_result_variable == expected_result_array)

    assert id(a_array) != id(a.data)
    assert id(expected_result_variable) != id(expected_result_array)

    def validate_variable_result(result) -> None:
        assert expected_result_variable.eq(result).all()
        assert isinstance(result, Variable)
        return

    def validate_array_result(result) -> None:
        assert np.all(result == expected_result_array)
        assert isinstance(result, np.ndarray)
        return

    # Variable
    validate_variable_result(abs(a))
    validate_variable_result(a.abs())
    validate_variable_result(np.abs(a))

    # numpy
    validate_array_result(abs(a_array))
    validate_array_result(np.abs(a_array))

    # Verify Derivative
    sgd = autograd.optimizer.SGD(learning_rate=1e-3)
    absolute_value = a.abs()
    variable_to_gradient = sgd.take_training_step(absolute_value)
    assert all(
        isinstance(var, Variable) and isinstance(grad, np.ndarray)
        for var, grad in variable_to_gradient.items())
    assert np.all(variable_to_gradient[a] == np.array([0, -1, -1, 1]))

    # Verify Trainability
    x = Variable(np.array([-2, -1, 0, 1, 2], dtype=float))
    sgd = autograd.optimizer.SGD(learning_rate=1e-1)
    for training_step_index in range(1_000):
        absolute_value = x.abs()
        if np.all(np.abs(absolute_value) < 1e-10):
            break
        sgd.take_training_step(absolute_value)
示例#15
0
def conv2d(input: Variable, kernel: Variable) -> Variable:

    inp, ker = input.data, kernel.data

    if inp.ndim == 2:

        s = kernel.data.shape + tuple(np.subtract(inp.shape, ker.shape) + 1)

        data = np.einsum(
            'ij,ijkl->kl', kernel.data,
            as_strided(input.data, shape=s, strides=input.data.strides * 2))

        return input.binary_op(kernel, data,
                               (kernel.data, lambda g, w: deconv2d(g, w)),
                               (kernel.data, lambda g, w: w * g.mean()))
示例#16
0
def test_squaring_equal_to_self_multiplication():
    # Variables
    x_1 = Variable(np.arange(4, dtype=float))
    x_2 = Variable(np.arange(4, dtype=float))
    x_3 = Variable(np.arange(4, dtype=float))

    # Optimizer
    learning_rate = 1e-3
    sgd_1 = autograd.optimizer.SGD(learning_rate)
    sgd_2 = autograd.optimizer.SGD(learning_rate)
    sgd_3 = autograd.optimizer.SGD(learning_rate)

    # Verify Results
    for _ in range(500):

        diff_1 = np.subtract(40, Variable(np.full(4, 5, dtype=float)).dot(x_1))
        diff_2 = np.subtract(40, Variable(np.full(4, 5, dtype=float)).dot(x_2))
        diff_3_a = np.subtract(40,
                               Variable(np.full(4, 5, dtype=float)).dot(x_3))
        diff_3_b = np.subtract(40,
                               Variable(np.full(4, 5, dtype=float)).dot(x_3))

        assert id(diff_3_a) != id(diff_3_b)

        loss_1 = diff_1**2
        loss_2 = diff_2 * diff_2
        loss_3 = diff_3_a * diff_3_b

        assert np.all(loss_1.data == loss_2.data)
        assert np.all(loss_2.data == loss_3.data)
        assert np.all(loss_3.data == loss_1.data)

        variable_to_gradient_1 = sgd_1.take_training_step(loss_1)
        variable_to_gradient_2 = sgd_2.take_training_step(loss_2)
        variable_to_gradient_3 = sgd_3.take_training_step(loss_3)

        assert np.all(
            variable_to_gradient_1[x_1] == variable_to_gradient_2[x_2])
        assert np.all(
            variable_to_gradient_2[x_2] == variable_to_gradient_3[x_3])
        assert np.all(
            variable_to_gradient_3[x_3] == variable_to_gradient_1[x_1])

        if loss_1 < 1e-6:
            break

    assert loss_1 < 1e-6
    assert loss_2 < 1e-6
    assert loss_3 < 1e-6
示例#17
0
def test_variable_less_than_equal_or_equal_to():

    # 1-D Array Case

    other_var = Variable(np.array([11, 22, 33, 44, 55]))
    for value in (
            np.arange(5),
            np.array([11, 22, 33, 44, 55]),
            np.array([0, 22, 0, 44, 0]),
    ):
        var = Variable(value)
        # Variable + Variable
        assert var.less_equal(other_var).all()
        assert var.less_than_equal(other_var).all()
        assert var.le(other_var).all()
        assert var.lte(other_var).all()
        assert (var <= other_var).all()
        assert np.less_equal(var, other_var).all()
        # numpy + numpy
        assert np.less_equal(value.copy(), np.array([11, 22, 33, 44,
                                                     55])).all()
        assert (value.copy() <= np.array([11, 22, 33, 44, 55])).all()
        # Variable + numpy
        assert var.less_equal(np.array([11, 22, 33, 44, 55])).all()
        assert var.less_than_equal(np.array([11, 22, 33, 44, 55])).all()
        assert var.le(np.array([11, 22, 33, 44, 55])).all()
        assert var.lte(np.array([11, 22, 33, 44, 55])).all()
        assert (var <= np.array([11, 22, 33, 44, 55])).all()
        assert np.less_equal(var, np.array([11, 22, 33, 44, 55])).all()
        # numpy + Variable
        assert np.less_equal(var, np.array([11, 22, 33, 44, 55])).all()
        assert (var <= np.array([11, 22, 33, 44, 55])).all()

    # 0-D Array Case

    other_var = Variable(np.array(21, dtype=float))
    for value in (9, 21):
        var = Variable(np.array(value))
        # Variable + Variable
        assert var.less_equal(other_var)
        assert var.less_than_equal(other_var)
        assert var.le(other_var)
        assert var.lte(other_var)
        assert var <= other_var
        assert np.less_equal(var, other_var)
        # numpy + numpy
        assert np.less_equal(np.array(value), np.array(21))
        assert np.all(np.array(value) <= np.array(21))
        # Variable + numpy
        assert var.less_equal(np.array(21)).all()
        assert var.less_than_equal(np.array(21)).all()
        assert var.le(np.array(21)).all()
        assert var.lte(np.array(21)).all()
        assert (var <= np.array(21)).all()
        assert np.less_equal(var, np.array(21, dtype=float)).all()
        # numpy + Variable
        assert np.less_equal(var, np.array(value, dtype=float)).all()
        assert (var <= np.array(value)).all()

    # Python Int Case

    other_var = Variable(84)
    for value in (37, 84):
        var = Variable(value)
        # Variable + Variable
        assert var.less_equal(other_var)
        assert var.less_than_equal(other_var)
        assert var.le(other_var)
        assert var.lte(other_var)
        assert var <= other_var
        assert np.less_equal(var, other_var)
        # Python + Python
        assert np.less_equal(np.array(value), np.array(84))
        assert np.all(np.array(value) <= np.array(84))
        # Variable + Python
        assert var.less_equal(np.array(84)).all()
        assert var.less_than_equal(np.array(84)).all()
        assert var.le(np.array(84)).all()
        assert var.lte(np.array(84)).all()
        assert (var <= np.array(84)).all()
        assert np.less_equal(var, np.array(84, dtype=float)).all()
        # Python + Variable
        assert np.less_equal(var, np.array(84, dtype=float)).all()
        assert (var <= np.array(84)).all()
示例#18
0
def test_variable_greater_than_equal_or_equal_to():

    # 1-D Array Case

    var = Variable(np.array([11, 22, 33, 44, 55]))
    for other_value in (
            np.arange(5),
            np.array([11, 22, 33, 44, 55]),
            np.array([0, 22, 0, 44, 0]),
    ):
        other_var = Variable(other_value)
        # Variable + Variable
        assert var.greater_equal(other_var).all()
        assert var.greater_than_equal(other_var).all()
        assert var.ge(other_var).all()
        assert var.gte(other_var).all()
        assert (var >= other_var).all()
        assert np.greater_equal(var, other_var).all()
        # numpy + numpy
        assert np.greater_equal(np.array([11, 22, 33, 44, 55]),
                                other_value.copy()).all()
        assert (np.array([11, 22, 33, 44, 55]) >= other_value.copy()).all()
        # Variable + numpy
        assert var.greater_equal(other_value.copy()).all()
        assert var.greater_than_equal(other_value.copy()).all()
        assert var.ge(other_value.copy()).all()
        assert var.gte(other_value.copy()).all()
        assert (var >= other_value.copy()).all()
        assert np.greater_equal(var, other_value.copy()).all()
        # numpy + Variable
        assert np.greater_equal(var, other_value.copy()).all()
        assert (var >= other_value.copy()).all()

    # 0-D Array Case

    var = Variable(np.array(21, dtype=float))
    for other_value in (9, 21):
        other_var = Variable(np.array(other_value))
        # Variable + Variable
        assert var.greater_equal(other_var)
        assert var.greater_than_equal(other_var)
        assert var.ge(other_var)
        assert var.gte(other_var)
        assert var >= other_var
        assert np.greater_equal(var, other_var)
        # numpy + numpy
        assert np.greater_equal(np.array(21), np.array(other_value))
        assert np.all(np.array(21) >= np.array(other_value))
        # Variable + numpy
        assert var.greater_equal(np.array(other_value)).all()
        assert var.greater_than_equal(np.array(other_value)).all()
        assert var.ge(np.array(other_value)).all()
        assert var.gte(np.array(other_value)).all()
        assert (var >= np.array(other_value)).all()
        assert np.greater_equal(var, np.array(other_value, dtype=float)).all()
        # numpy + Variable
        assert np.greater_equal(var, np.array(other_value, dtype=float)).all()
        assert (var >= np.array(other_value)).all()

    # Python Int Case

    var = Variable(84)
    for other_value in (37, 84):
        other_var = Variable(other_value)
        # Variable + Variable
        assert var.greater_equal(other_var)
        assert var.greater_than_equal(other_var)
        assert var.ge(other_var)
        assert var.gte(other_var)
        assert var >= other_var
        assert np.greater_equal(var, other_var)
        # Python + Python
        assert np.greater_equal(np.array(84), np.array(other_value))
        assert np.all(np.array(84) >= np.array(other_value))
        # Variable + Python
        assert var.greater_equal(np.array(other_value)).all()
        assert var.greater_than_equal(np.array(other_value)).all()
        assert var.ge(np.array(other_value)).all()
        assert var.gte(np.array(other_value)).all()
        assert (var >= np.array(other_value)).all()
        assert np.greater_equal(var, np.array(other_value, dtype=float)).all()
        # Python + Variable
        assert np.greater_equal(var, np.array(other_value, dtype=float)).all()
        assert (var >= np.array(other_value)).all()
示例#19
0
def test_variable_less():

    # 1-D Array Case

    var = Variable(np.arange(5))
    other_var = Variable(np.array([11, 22, 33, 44, 55]))
    # Variable + Variable
    assert var.less(other_var).all()
    assert var.less_than(other_var).all()
    assert var.lt(other_var).all()
    assert (var < other_var).all()
    assert np.less(var, other_var).all()
    # numpy + numpy
    assert np.less(np.arange(5), np.array([11, 22, 33, 44, 55])).all()
    assert (np.arange(5) < np.array([11, 22, 33, 44, 55])).all()
    # Variable + numpy
    assert var.less(np.array([11, 22, 33, 44, 55])).all()
    assert var.less_than(np.array([11, 22, 33, 44, 55])).all()
    assert var.lt(np.array([11, 22, 33, 44, 55])).all()
    assert (var < np.array([11, 22, 33, 44, 55])).all()
    assert np.less(var, np.array([11, 22, 33, 44, 55])).all()
    # numpy + Variable
    assert np.less(var, np.array([11, 22, 33, 44, 55])).all()
    assert (var < np.array([11, 22, 33, 44, 55])).all()

    # 0-D Array Case

    var = Variable(np.array(9))
    other_var = Variable(np.array(21, dtype=float))
    # Variable + Variable
    assert var.less(other_var)
    assert var.less_than(other_var)
    assert var.lt(other_var)
    assert var < other_var
    assert np.less(var, other_var)
    # numpy + numpy
    assert np.less(np.array(9), np.array(21))
    assert np.all(np.array(9) < np.array(21))
    # Variable + numpy
    assert var.less(np.array(21)).all()
    assert var.less_than(np.array(21)).all()
    assert var.lt(np.array(21)).all()
    assert (var < np.array(21)).all()
    assert np.less(var, np.array(21, dtype=float)).all()
    # numpy + Variable
    assert np.less(var, np.array(21, dtype=float)).all()
    assert (var < np.array(21)).all()

    # Python Int Case

    var = Variable(37)
    other_var = Variable(84)
    # Variable + Variable
    assert var.less(other_var)
    assert var.less_than(other_var)
    assert var.lt(other_var)
    assert var < other_var
    assert np.less(var, other_var)
    # Python + Python
    assert np.less(np.array(37), np.array(84))
    assert np.all(np.array(37) < np.array(84))
    # Variable + Python
    assert var.less(np.array(84)).all()
    assert var.less_than(np.array(84)).all()
    assert var.lt(np.array(84)).all()
    assert (var < np.array(84)).all()
    assert np.less(var, np.array(84, dtype=float)).all()
    # Python + Variable
    assert np.less(var, np.array(84, dtype=float)).all()
    assert (var < np.array(84)).all()
示例#20
0
def test_variable_greater():

    # 1-D Array Case

    var = Variable(np.array([11, 22, 33, 44, 55]))
    other_var = Variable(np.arange(5))
    # Variable + Variable
    assert var.greater(other_var).all()
    assert var.greater_than(other_var).all()
    assert var.gt(other_var).all()
    assert (var > other_var).all()
    assert np.greater(var, other_var).all()
    # numpy + numpy
    assert np.greater(np.array([11, 22, 33, 44, 55]), np.arange(5)).all()
    assert (np.array([11, 22, 33, 44, 55]) > np.arange(5)).all()
    # Variable + numpy
    assert var.greater(np.arange(5)).all()
    assert var.greater_than(np.arange(5)).all()
    assert var.gt(np.arange(5)).all()
    assert (var > np.arange(5)).all()
    assert np.greater(var, np.arange(5, dtype=float)).all()
    # numpy + Variable
    assert np.greater(var, np.arange(5, dtype=float)).all()
    assert (var > np.arange(5)).all()

    # 0-D Array Case

    var = Variable(np.array(21, dtype=float))
    other_var = Variable(np.array(9))
    # Variable + Variable
    assert var.greater(other_var)
    assert var.greater_than(other_var)
    assert var.gt(other_var)
    assert var > other_var
    assert np.greater(var, other_var)
    # numpy + numpy
    assert np.greater(np.array(21), np.array(9))
    assert np.all(np.array(21) > np.array(9))
    # Variable + numpy
    assert var.greater(np.array(9)).all()
    assert var.greater_than(np.array(9)).all()
    assert var.gt(np.array(9)).all()
    assert (var > np.array(9)).all()
    assert np.greater(var, np.array(9, dtype=float)).all()
    # numpy + Variable
    assert np.greater(var, np.array(9, dtype=float)).all()
    assert (var > np.array(9)).all()

    # Python Int Case

    var = Variable(84)
    other_var = Variable(37)
    # Variable + Variable
    assert var.greater(other_var)
    assert var.greater_than(other_var)
    assert var.gt(other_var)
    assert var > other_var
    assert np.greater(var, other_var)
    # Python + Python
    assert np.greater(np.array(84), np.array(37))
    assert np.all(np.array(84) > np.array(37))
    # Variable + Python
    assert var.greater(np.array(37)).all()
    assert var.greater_than(np.array(37)).all()
    assert var.gt(np.array(37)).all()
    assert (var > np.array(37)).all()
    assert np.greater(var, np.array(37, dtype=float)).all()
    # Python + Variable
    assert np.greater(var, np.array(37, dtype=float)).all()
    assert (var > np.array(37)).all()
示例#21
0
def test_variable_not_equal():

    # 1-D Array Case

    var = Variable(np.array([11, 22, 33, 44, 55]))
    other_var = Variable(np.arange(5))
    # Variable + Variable
    assert var.not_equal(other_var).all()
    assert var.neq(other_var).all()
    assert var.ne(other_var).all()
    assert (var != other_var).all()
    assert np.not_equal(var, other_var).all()
    # numpy + numpy
    assert np.not_equal(np.arange(5), np.array([11, 22, 33, 44, 55])).all()
    assert (np.arange(5) != np.array([11, 22, 33, 44, 55])).all()
    # Variable + numpy
    assert var.not_equal(np.arange(5)).all()
    assert var.neq(np.arange(5)).all()
    assert var.ne(np.arange(5)).all()
    assert (var != np.arange(5)).all()
    assert np.not_equal(var, np.arange(5, dtype=float)).all()
    # numpy + Variable
    assert np.not_equal(np.arange(5, dtype=float), var).all()
    assert (np.arange(5) != var).all()

    # 0-D Array Case

    var = Variable(np.array(21, dtype=float))
    other_var = Variable(np.array(9))
    # Variable + Variable
    assert var.not_equal(other_var)
    assert var.neq(other_var)
    assert var.ne(other_var)
    assert var != other_var
    assert np.not_equal(var, other_var)
    # numpy + numpy
    assert np.not_equal(np.array(9), np.array(21))
    assert np.all(np.array(9) != np.array(21))
    # Variable + numpy
    assert var.not_equal(np.array(9)).all()
    assert var.neq(np.array(9)).all()
    assert var.ne(np.array(9)).all()
    assert (var != np.array(9)).all()
    assert np.not_equal(var, np.array(9, dtype=float)).all()
    # numpy + Variable
    assert np.not_equal(np.array(9, dtype=float), var).all()
    assert (np.array(9) != var).all()

    # Python Int Case

    var = Variable(37)
    other_var = Variable(84)
    # Variable + Variable
    assert var.not_equal(other_var)
    assert var.neq(other_var)
    assert var.ne(other_var)
    assert var != other_var
    assert np.not_equal(var, other_var)
    # Python + Python
    assert np.not_equal(np.array(37), np.array(84))
    assert np.all(np.array(37) != np.array(84))
    # Variable + Python
    assert var.not_equal(np.array(84)).all()
    assert var.neq(np.array(84)).all()
    assert var.ne(np.array(84)).all()
    assert (var != np.array(84)).all()
    assert np.not_equal(var, np.array(84, dtype=float)).all()
    # Python + Variable
    assert np.not_equal(np.array(84, dtype=float), var).all()
    assert (np.array(84) != var).all()
示例#22
0
def test_variable_equal():

    # 1-D Array Case

    var = Variable(np.arange(5))
    other_var = Variable(np.arange(5))
    # Variable + Variable
    assert var.equal(other_var).all()
    assert var.eq(other_var).all()
    assert (var == other_var).all()
    assert np.equal(var, other_var).all()
    # numpy + numpy
    assert np.equal(np.arange(5), np.arange(5)).all()
    assert (np.arange(5) == np.arange(5)).all()
    # Variable + numpy
    assert var.equal(np.arange(5)).all()
    assert var.eq(np.arange(5)).all()
    assert (var == np.arange(5)).all()
    assert np.equal(var, np.arange(5, dtype=float)).all()
    # numpy + Variable
    assert np.equal(np.arange(5, dtype=float), var).all()
    assert (np.arange(5) == var).all()

    # 0-D Array Case

    var = Variable(np.array(9))
    other_var = Variable(np.array(9))
    # Variable + Variable
    assert var.equal(other_var)
    assert var.eq(other_var)
    assert var == other_var
    assert np.equal(var, other_var)
    # numpy + numpy
    assert np.equal(np.array(9), np.array(9))
    assert np.all(np.array(9) == np.array(9))
    # Variable + numpy
    assert var.equal(np.array(9)).all()
    assert var.eq(np.array(9)).all()
    assert (var == np.array(9)).all()
    assert np.equal(var, np.array(9, dtype=float)).all()
    # numpy + Variable
    assert np.equal(np.array(9, dtype=float), var).all()
    assert (np.array(9) == var).all()

    # Python Int Case

    var = Variable(7)
    other_var = Variable(7)
    # Variable + Variable
    assert var.equal(other_var)
    assert var.eq(other_var)
    assert var == other_var
    assert np.equal(var, other_var)
    # Python + Python
    assert np.equal(np.array(7), np.array(7))
    assert np.all(np.array(7) == np.array(7))
    # Variable + Python
    assert var.equal(np.array(7)).all()
    assert var.eq(np.array(7)).all()
    assert (var == np.array(7)).all()
    assert np.equal(var, np.array(7, dtype=float)).all()
    # Python + Variable
    assert np.equal(np.array(7, dtype=float), var).all()
    assert (np.array(7) == var).all()
示例#23
0
    # Verify Trainability (Base)
    x = Variable(np.random.rand(2))
    y = np.array([100, 8])
    sgd = autograd.optimizer.SGD(learning_rate=1e-4)
    for training_step_index in range(10_000):
        y_hat = x.pow(np.array([2, 3]))
        diff = np.subtract(y, y_hat)
        loss = diff**2
        if training_step_index > 10 and loss.sum() < 1e-4:
            break
        sgd.take_training_step(loss)
    assert np.abs(x - np.array([10, 2])).sum() < 3e-3
    assert loss.sum() < 1e-4

    # Verify Trainability (Exponent)
    x = Variable(np.array([1.9, 2.9], dtype=float))
    y = np.array([9, 8])
    sgd = autograd.optimizer.SGD(learning_rate=1e-3)
    for training_step_index in range(1_000):
        y_hat = np.float_power(np.array([3, 2], dtype=float), x)
        diff = np.subtract(y, y_hat)
        loss = diff**2
        if training_step_index > 10 and loss.sum() < 1e-4:
            break
        sgd.take_training_step(loss)
    assert np.abs(x - np.array([2, 3])).sum() < 9e-3
    assert loss.sum() < 1e-4


def test_variable_add():
    a_array = np.arange(5)
示例#24
0
def test_variable_pow():
    a_array = np.arange(5, dtype=float) + 1
    b_array = np.array([0, 2, 2, 3, 3], dtype=float)
    a = Variable(np.arange(5, dtype=float) + 1)
    b = Variable(np.array([0, 2, 2, 3, 3], dtype=float))
    expected_result_variable = Variable(np.array([1, 4, 9, 64, 125]))
    expected_result_array = np.array([1, 4, 9, 64, 125])

    assert np.all(a_array == a.data)
    assert np.all(b_array == b.data)
    assert np.all(expected_result_variable == expected_result_array)

    assert id(a_array) != id(a.data)
    assert id(b_array) != id(b.data)
    assert id(expected_result_variable) != id(expected_result_array)

    def validate_variable_result(result) -> None:
        assert result.eq(expected_result_variable).all()
        assert isinstance(result, Variable)
        return

    def validate_array_result(result) -> None:
        assert np.all(result == expected_result_array)
        assert isinstance(result, np.ndarray)
        return

    # Variable + Variable
    validate_variable_result(a.power(b))
    validate_variable_result(a.pow(b))
    validate_variable_result(a**b)
    validate_variable_result(np.float_power(a, b))

    # numpy + numpy
    validate_array_result(np.float_power(a_array, b_array))
    validate_array_result(a_array**b_array)

    # Variable + numpy
    validate_variable_result(a.power(b_array))
    validate_variable_result(a**b_array)
    validate_variable_result(np.float_power(a, b_array))

    # numpy + Variable
    validate_variable_result(np.float_power(a_array, b))
    # validate_variable_result(a_array ** b)

    # Verify Derivative
    sgd = autograd.optimizer.SGD(learning_rate=1e-3)
    result = a**b
    variable_to_gradient = sgd.take_training_step(result)
    assert all(
        isinstance(var, Variable) and isinstance(grad, np.ndarray)
        for var, grad in variable_to_gradient.items())
    assert np.all(variable_to_gradient[a] == b_array *
                  (a_array**(b_array - 1)))
    assert np.all(variable_to_gradient[b] == np.log(a_array) *
                  (a_array**b_array))

    # Verify Trainability (Base)
    x = Variable(np.random.rand(2))
    y = np.array([100, 8])
    sgd = autograd.optimizer.SGD(learning_rate=1e-4)
    for training_step_index in range(10_000):
        y_hat = x.pow(np.array([2, 3]))
        diff = np.subtract(y, y_hat)
        loss = diff**2
        if training_step_index > 10 and loss.sum() < 1e-4:
            break
        sgd.take_training_step(loss)
示例#25
0
def test_variable_add():
    a_array = np.arange(5)
    b_array = np.array([3, 8, 5, 6, 8])
    a = Variable(np.arange(5, dtype=float))
    b = Variable(np.array([3, 8, 5, 6, 8], dtype=float))
    expected_result_variable = Variable(np.array([3, 9, 7, 9, 12]))
    expected_result_array = np.array([3, 9, 7, 9, 12])

    assert np.all(a_array == a.data)
    assert np.all(b_array == b.data)
    assert np.all(expected_result_variable == expected_result_array)

    assert id(a_array) != id(a.data)
    assert id(b_array) != id(b.data)
    assert id(expected_result_variable) != id(expected_result_array)

    def validate_variable_result(result) -> None:
        assert result.eq(expected_result_variable).all()
        assert isinstance(result, Variable)
        return

    def validate_array_result(result) -> None:
        assert np.all(result == expected_result_array)
        assert isinstance(result, np.ndarray)
        return

    # Variable + Variable
    validate_variable_result(a.add(b))
    validate_variable_result(a + b)
    validate_variable_result(np.add(a, b))

    # numpy + numpy
    validate_array_result(np.add(a_array, b_array))
    validate_array_result(a_array + b_array)

    # Variable + numpy
    validate_variable_result(a.add(b_array))
    validate_variable_result(a + b_array)
    validate_variable_result(np.add(a, b_array))

    # numpy + Variable
    validate_variable_result(np.add(a_array, b))
    # validate_variable_result(a_array + b)

    # Verify Derivative
    sgd = autograd.optimizer.SGD(learning_rate=1e-3)
    summation = a + b
    variable_to_gradient = sgd.take_training_step(summation)
    assert all(
        isinstance(var, Variable) and isinstance(grad, np.ndarray)
        for var, grad in variable_to_gradient.items())
    assert np.all(variable_to_gradient[a] == np.ones(a.shape))
    assert np.all(variable_to_gradient[b] == np.ones(b.shape))

    # Verify Trainability
    x = Variable(np.random.rand(2))
    y = np.array([10, 10])
    sgd = autograd.optimizer.SGD(learning_rate=1e-1)
    for training_step_index in range(1_000):
        y_hat = x.add(np.array([-10, 50]))
        diff = np.subtract(y, y_hat)
        loss = diff**2
        if training_step_index > 10 and loss.sum() < 1e-10:
            break
        sgd.take_training_step(loss)
示例#26
0
def test_variable_multiply():
    a_array = np.arange(5)
    b_array = np.array([3, 8, 5, 6, 8])
    a = Variable(np.arange(5, dtype=float))
    b = Variable(np.array([3, 8, 5, 6, 8], dtype=float))
    expected_result_variable = Variable(np.array([0, 8, 10, 18, 32]))
    expected_result_array = np.array([0, 8, 10, 18, 32])

    assert np.all(a_array == a.data)
    assert np.all(b_array == b.data)
    assert np.all(expected_result_variable == expected_result_array)

    assert id(a_array) != id(a.data)
    assert id(b_array) != id(b.data)
    assert id(expected_result_variable) != id(expected_result_array)

    def validate_variable_result(result) -> None:
        assert result.eq(expected_result_variable).all()
        assert isinstance(result, Variable)
        return

    def validate_array_result(result) -> None:
        assert np.all(result == expected_result_array)
        assert isinstance(result, np.ndarray)
        return

    # Variable + Variable
    validate_variable_result(a.multiply(b))
    validate_variable_result(a * b)
    validate_variable_result(np.multiply(a, b))

    # numpy + numpy
    validate_array_result(np.multiply(a_array, b_array))
    validate_array_result(a_array * b_array)

    # Variable + numpy
    validate_variable_result(a.multiply(b_array))
    validate_variable_result(a * b_array)
    validate_variable_result(np.multiply(a, b_array))

    # numpy + Variable
    validate_variable_result(np.multiply(a_array, b))
    # validate_variable_result(a_array * b)

    # Verify Derivative
    sgd = autograd.optimizer.SGD(learning_rate=1e-3)
    product = a * b
    variable_to_gradient = sgd.take_training_step(product)
    assert all(
        isinstance(var, Variable) and isinstance(grad, np.ndarray)
        for var, grad in variable_to_gradient.items())
    assert np.all(variable_to_gradient[a] == b_array)
    assert np.all(variable_to_gradient[b] == a_array)

    # Verify Trainability
    x = Variable(np.random.rand(2))
    y = 0
    sgd = autograd.optimizer.SGD(learning_rate=1e-4)
    for training_step_index in range(1_000):
        y_hat = x.multiply(np.array([-10, 50]))
        diff = np.subtract(y, y_hat)
        loss = diff**2
        if np.all(loss < 1e-3):
            break
        sgd.take_training_step(loss)
示例#27
0
def test_variable_expand_dims():
    a_array = np.arange(5)
    a = Variable(np.arange(5, dtype=float))
    expected_result_variable = Variable(np.array([[0, 1, 2, 3, 4]]))
    expected_result_number = np.array([[0, 1, 2, 3, 4]])

    assert np.all(a_array == a.data)
    assert np.all(expected_result_variable == expected_result_number)

    assert id(a_array) != id(a.data)
    assert id(expected_result_variable) != id(expected_result_number)

    def validate_variable_result(result) -> None:
        assert result.eq(expected_result_variable).all()
        assert isinstance(result, Variable)
        return

    def validate_array_result(result) -> None:
        assert np.all(result == expected_result_number)
        assert isinstance(result, np.ndarray)
        return

    # Variable
    validate_variable_result(a.expand_dims(0))
    validate_variable_result(a.expand_dims((0, )))
    validate_variable_result(np.expand_dims(a, 0))
    validate_variable_result(np.expand_dims(a, (0, )))

    # numpy
    validate_array_result(np.expand_dims(a_array, 0))
    validate_array_result(np.expand_dims(a_array, (0, )))

    # Verify Derivative
    sgd = autograd.optimizer.SGD(learning_rate=1e-3)
    a_expanded = a.expand_dims(0)
    diff = a_expanded - np.zeros(5)
    loss = np.sum(diff**2)
    variable_to_gradient = sgd.take_training_step(loss)
    assert all(
        isinstance(var, Variable) and isinstance(grad, np.ndarray)
        for var, grad in variable_to_gradient.items())
    assert a_expanded.data.base is not a.data
    assert np.all(variable_to_gradient[a] == np.arange(5) * 2)
    assert tuple(variable_to_gradient[a].shape) == (5, )
    assert np.all(variable_to_gradient[a_expanded] == np.arange(5) * 2)
    assert tuple(variable_to_gradient[a_expanded].shape) == (1, 5)
    assert np.all(variable_to_gradient[a_expanded].squeeze(0) ==
                  variable_to_gradient[a_expanded])
    assert np.all(variable_to_gradient[a_expanded].squeeze() ==
                  variable_to_gradient[a_expanded])
    assert np.all(a.data == a_expanded.data)

    # Verify Trainability
    x = Variable(np.random.rand(2))
    y = np.array([[10], [30]])
    sgd = autograd.optimizer.SGD(learning_rate=1e-1)
    for training_step_index in range(1_000):
        y_hat = x.expand_dims(1)
        assert y_hat.data.base is not x.data
        diff = np.subtract(y, y_hat)
        loss = np.sum(diff**2)
        if training_step_index > 10 and loss.sum() < 1e-10:
            break
        sgd.take_training_step(loss)
示例#28
0
def test_variable_isclose():

    # 1-D Array Case

    value = np.array([0, 1e-16])
    other_value = np.zeros(2)
    var = Variable(np.array([0, 1e-16]))
    other_var = Variable(np.zeros(2))
    # Variable + Variable
    assert var.isclose(other_var).all()
    assert np.isclose(var, other_var).all()
    # numpy + numpy
    assert np.isclose(value, other_value).all()
    # Variable + numpy
    assert var.isclose(other_value).all()
    assert np.isclose(var, other_value).all()
    # numpy + Variable
    assert np.isclose(other_value, var).all()

    # 0-D Array Case

    value = np.array(1e-10)
    other_value = np.array(1e-16)
    var = Variable(1e-10)
    other_var = Variable(1e-16)
    # Variable + Variable
    assert var.isclose(other_var)
    assert np.isclose(var, other_var)
    # numpy + numpy
    assert np.isclose(value, other_value)
    # Variable + numpy
    assert var.isclose(other_value).all()
    assert np.isclose(var, other_value).all()
    # numpy + Variable
    assert np.isclose(other_value, var).all()

    # Python Float Case

    value = 1e-10
    other_value = 1e-16
    var = Variable(1e-10)
    other_var = Variable(1e-16)
    # Variable + Variable
    assert var.isclose(other_var)
    assert np.isclose(var, other_var)
    # numpy + Python
    assert np.isclose(value, other_value)
    # Variable + Python
    assert var.isclose(other_value).all()
    assert np.isclose(var, other_value).all()
    # Python + Variable
    assert np.isclose(other_value, var).all()
示例#29
0
def test_variable_all_any():
    for var in (
            Variable(np.arange(100)),
            Variable(np.arange(100).reshape(2, 50)),
            Variable(np.arange(100).reshape(2, 5, 10)),
    ):
        for operand in (var, var.data):
            assert not operand.all()
            assert operand.any()
            assert not np.all(operand)
            assert np.any(operand)

    for var in (
            Variable(np.array(1)),
            Variable(np.ones(100)),
            Variable(np.ones(100).reshape(2, 50)),
            Variable(np.ones(100).reshape(2, 5, 10)),
    ):
        for operand in (var, var.data):
            assert operand.all()
            assert operand.any()
            assert np.all(operand)
            assert np.any(operand)

    for var in (
            Variable(np.array(0)),
            Variable(np.zeros(100)),
            Variable(np.zeros(100).reshape(2, 50)),
            Variable(np.zeros(100).reshape(2, 5, 10)),
    ):
        for operand in (var, var.data):
            assert not operand.all()
            assert not operand.any()
            assert not np.all(operand)
            assert not np.any(operand)

    assert not Variable(0).all()
    assert not Variable(0).any()
    assert Variable(1).all()
    assert Variable(1).any()

    assert not np.all(0)
    assert not np.any(0)
    assert np.all(3)
    assert np.any(4)
示例#30
0
def test_variable_matmul():
    a_matrix = np.arange(10, dtype=float).reshape(2, 5)
    b_matrix = np.arange(10, dtype=float).reshape(2, 5).T
    a = Variable(np.array([
        [0, 1, 2, 3, 4],
        [5, 6, 7, 8, 9],
    ], dtype=float))
    b = Variable(
        np.array([
            [0, 5],
            [1, 6],
            [2, 7],
            [3, 8],
            [4, 9],
        ], dtype=float))
    expected_result_variable = Variable(
        np.array([
            [30, 80],
            [80, 255],
        ], dtype=float))
    expected_result_matrix = np.array([
        [30, 80],
        [80, 255],
    ], dtype=float)

    assert np.all(a_matrix == a.data)
    assert np.all(b_matrix == b.data)
    assert np.all(expected_result_variable == expected_result_matrix)

    assert id(a_matrix) != id(a.data)
    assert id(b_matrix) != id(b.data)
    assert id(expected_result_variable) != id(expected_result_matrix)

    def validate_variable_result(result) -> None:
        assert tuple(result.shape) == (2, 2)
        assert result.eq(expected_result_variable).all()
        assert isinstance(result, Variable)
        return

    def validate_matrix_result(result) -> None:
        assert tuple(result.shape) == (2, 2)
        assert np.all(result == expected_result_matrix)
        assert isinstance(result, np.ndarray)
        return

    # Variable + Variable
    validate_variable_result(a.matmul(b))
    validate_variable_result(a @ b)
    validate_variable_result(np.matmul(a, b))

    # numpy + numpy
    validate_matrix_result(np.matmul(a_matrix, b_matrix))
    validate_matrix_result(a_matrix @ b_matrix)

    # Variable + numpy
    validate_variable_result(a.matmul(b_matrix))
    validate_variable_result(a @ b_matrix)
    validate_variable_result(np.matmul(a, b_matrix))

    # numpy + Variable
    validate_variable_result(np.matmul(a_matrix, b))
    # validate_variable_result(a_matrix @ b)

    # Verify Derivative
    sgd = autograd.optimizer.SGD(learning_rate=1e-3)
    matrix_product = a @ b
    variable_to_gradient = sgd.take_training_step(matrix_product)
    assert all(
        isinstance(var, Variable) and isinstance(grad, np.ndarray)
        for var, grad in variable_to_gradient.items())
    assert np.all(variable_to_gradient[a].shape == a.shape)
    assert np.all(variable_to_gradient[b].shape == b.shape)
    assert np.all(variable_to_gradient[a] == b_matrix.T)
    assert np.all(variable_to_gradient[b] == a_matrix.T)

    # Verify Trainability
    x = Variable(np.array([[1.1, 1.9], [2.9, 4.1]]))
    y = np.array([[7, 10], [15, 22]])
    sgd = autograd.optimizer.SGD(learning_rate=1e-2)
    for training_step_index in range(1_000):
        y_hat = x.matmul(np.array([[1, 2], [3, 4]]))
        diff = np.subtract(y, y_hat)
        loss = diff**2
        if training_step_index > 10 and loss.sum() < 1e-10:
            break
        sgd.take_training_step(loss)