def test_020_std_function_method_to_fail():
    """
    Objective:
        Verify the _layer class instance function validates invalid inputs
    Expected:
        Layer method fails.
    """
    for _ in range(NUM_MAX_TEST_TIMES):
        name = random_string(np.random.randint(1, 10))

        # For which works on statistics on per-feature basis,
        # no sense if M = 1 or N = 1.
        M: int = np.random.randint(2, NUM_MAX_NODES)
        momentum = TYPE_FLOAT(0.85)

        try:
            _layer = _instance(name=name, num_nodes=M, momentum=momentum)
            _layer.function(int(1))
            raise RuntimeError("Invoke _layer.function(int(1)) must fail.")
        except AssertionError:
            pass

        try:
            _layer = _instance(name=name, num_nodes=M, momentum=momentum)
            _layer.gradient(int(1))
            raise RuntimeError("Invoke _layer.gradient(int(1)) must fail.")
        except AssertionError:
            pass
def test_020_fss_method_function_multi_invocations_to_succeed():
    """
    Objective:
        Verify the layer class instance function method
    Expected:
        Layer method calculate expected values.
    """
    def objective(X: np.ndarray) -> Union[float, np.ndarray]:
        """Dummy objective function"""
        return np.sum(X, dtype=TYPE_FLOAT)

    profiler = cProfile.Profile()
    profiler.enable()
    for _ in range(NUM_MAX_TEST_TIMES):
        name = random_string(np.random.randint(1, 10))
        numexpr_enabled = bool(np.random.randint(0, 2))

        # For BN which works on statistics on per-feature basis,
        # no sense if M = 1 or N = 1.
        N: int = np.random.randint(1, NUM_MAX_BATCH_SIZE)
        M: int = np.random.randint(2, NUM_MAX_NODES)

        X = np.random.randn(N, M).astype(TYPE_FLOAT)

        # ********************************************************************************
        # Constraint:
        #   layer needs to reallocate X related storages upon X.shape[0] change.
        # ********************************************************************************
        _layer = _instance(name=name, num_nodes=M, log_level=logging.DEBUG)
        _layer.objective = objective

        for i in range(np.random.randint(1, 100)):
            _layer.function(
                X,
                numexpr_enabled=numexpr_enabled,
            )

        while True:
            Z = np.random.randn(np.random.randint(1, NUM_MAX_BATCH_SIZE), M)
            if Z.shape[0] != N:
                Z = Z.astype(TYPE_FLOAT)
                break

        _layer.function(
            Z,
            numexpr_enabled=numexpr_enabled,
        )

        # ********************************************************************************
        # Constraint: gamma, beta should match those of Z
        # ********************************************************************************
        _validate_storage_allocation(_layer, Z)

    profiler.disable()
    profiler.print_stats(sort="cumtime")
def test_020_std_instance_properties_access_to_succeed():
    """
    Objective:
        Verify the _layer class instance has initialized its properties.
    Expected:
        Layer parameter access to succeed
    """

    def objective(X: TYPE_TENSOR):
        """Dummy objective function"""
        return np.sum(X)

    for _ in range(NUM_MAX_TEST_TIMES):
        name = random_string(np.random.randint(1, 10))
        M: int = np.random.randint(1, NUM_MAX_NODES)
        _layer = Standardization(
            name=name,
            num_nodes=M,
            log_level=logging.DEBUG
        )
        _layer.objective = objective

        assert _layer.name == name
        assert _layer.num_nodes == M

        assert \
            _layer.U.dtype == TYPE_FLOAT and \
            _layer.U.shape == (M,)

        assert \
            _layer.dU.dtype == TYPE_FLOAT and \
            _layer.dU.size == M

        assert \
            _layer.dV.dtype == TYPE_FLOAT and \
            _layer.dV.size == M

        assert \
            _layer.SD.dtype == TYPE_FLOAT and \
            _layer.SD.shape == (M,)

        assert \
            _layer.norm.dtype == TYPE_FLOAT and \
            _layer.norm.shape == (M,)

        assert \
            _layer.RU.dtype == TYPE_FLOAT and \
            _layer.RU.shape == (M,)

        assert \
            _layer.RSD.dtype == TYPE_FLOAT and \
            _layer.RSD.shape == (M,)

        assert _layer.objective == objective
def test_020_fss_method_gradient_descent():
    """
    Objective:
        Verify the gradient descent
    Expected:
        The objective decrease with the descents.
    """
    def objective(X: np.ndarray) -> Union[float, np.ndarray]:
        """Dummy objective function"""
        return np.sum(X)

    profiler = cProfile.Profile()
    profiler.enable()

    for _ in range(NUM_MAX_TEST_TIMES):
        name = random_string(np.random.randint(1, 10))
        numexpr_enabled = bool(np.random.randint(0, 2))

        N: int = np.random.randint(2, NUM_MAX_BATCH_SIZE)
        M: int = np.random.randint(2, NUM_MAX_NODES)

        X = np.random.randn(N, M).astype(TYPE_FLOAT)
        _layer = _instance(name=name, num_nodes=M, log_level=logging.DEBUG)
        _layer.objective = objective

        u = GRADIENT_DIFF_ACCEPTANCE_VALUE
        for _ in range(np.random.randint(1, 10)):
            dout = np.random.uniform(-1, 1, size=X.shape).astype(TYPE_FLOAT)

            Y = _layer.function(
                X,
                numexpr_enabled=numexpr_enabled,
            )
            # pylint: disable=not-callable
            L = _layer.objective(Y)
            G = _layer.gradient(
                dY=dout,
                numexpr_enabled=numexpr_enabled,
            )
            dGamma, dBeta = _layer.update()

            # ********************************************************************************
            # Constraint: expected gradients match with actual
            # ********************************************************************************
            expected_dGamma = np.sum(dout * _layer.X, axis=0, dtype=TYPE_FLOAT)
            expected_dBeta = np.sum(dout, axis=0, dtype=TYPE_FLOAT)
            assert np.allclose(expected_dGamma, dGamma, atol=u), \
                "Need dGamma\n%s\nbut\n%s\ndiff=\n%s\n" \
                % (expected_dGamma, dGamma, expected_dGamma-dGamma)
            assert np.allclose(expected_dBeta, dBeta, atol=u), \
                "Need dBeta\n%s\nbut\n%s\ndiff=\n%s\n" \
                % (expected_dBeta, dBeta, expected_dBeta-dBeta)
def test_020_event_context_instance_properties(caplog):
    """
    Objective:
        Verify the layer class validates the parameters have been initialized before accessed.
    Expected:
        Initialization detects the access to the non-initialized parameters and fails.
    """
    caplog.set_level(logging.DEBUG)
    name = "test_020_event_context_instance_properties"
    msg = "Accessing uninitialized property of the layer must fail."

    profiler = cProfile.Profile()
    profiler.enable()
    for _ in range(NUM_MAX_TEST_TIMES):
        stride: TYPE_INT = TYPE_INT(np.random.randint(1, 100))
        event_size: TYPE_INT = TYPE_INT(np.random.randint(1, 100))
        window_size: TYPE_INT = 2 * stride + event_size

        name = random_string(np.random.randint(1, 10))
        event_context = _must_succeed(name=name,
                                      num_nodes=TYPE_INT(1),
                                      window_size=window_size,
                                      event_size=event_size,
                                      msg=msg)

        # --------------------------------------------------------------------------------
        # To pass
        # --------------------------------------------------------------------------------
        try:
            if not event_context.name == name:
                raise RuntimeError("event_context.name == name should be true")
        except AssertionError as e:
            raise RuntimeError(
                "Access to name should be allowed as already initialized."
            ) from e

        try:
            if not isinstance(event_context.logger, logging.Logger):
                raise RuntimeError(
                    "isinstance(event_context.logger, logging.Logger) should be true"
                )
        except AssertionError as e:
            raise RuntimeError(
                "Access to logger should be allowed as already initialized."
            ) from e

        assert event_context.window_size == window_size
        assert event_context.event_size == event_size

    profiler.disable()
    profiler.print_stats(sort="cumtime")
def test_020_fss_method_function_to_succeed():
    """
    Objective:
        Verify the layer class instance function method
    Expected:
        Layer method calculate expected values.
    """
    def objective(X: np.ndarray) -> Union[float, np.ndarray]:
        """Dummy objective function"""
        return np.sum(X, dtype=TYPE_FLOAT)

    profiler = cProfile.Profile()
    profiler.enable()
    for _ in range(NUM_MAX_TEST_TIMES):
        name = random_string(np.random.randint(1, 10))
        numexpr_enabled = bool(np.random.randint(0, 2))
        numba_enabled = bool(np.random.randint(0, 2))

        # For BN which works on statistics on per-feature basis,
        # no sense if M = 1 or N = 1.
        N: int = np.random.randint(1, NUM_MAX_BATCH_SIZE)
        M: int = np.random.randint(2, NUM_MAX_NODES)

        X = np.random.randn(N, M).astype(TYPE_FLOAT)

        _layer = FeatureScaleShift(name=name,
                                   num_nodes=M,
                                   log_level=logging.DEBUG)
        _layer.objective = objective
        _layer.function(X)

        # ********************************************************************************
        # Constraint:
        #   _layer.N provides the latest X.shape[0]
        #   X related arrays should have its storage allocated and has the X.shape.
        #   * dX
        # ********************************************************************************
        assert _layer.N == X.shape[0]
        assert \
            _layer.dX.dtype == TYPE_FLOAT and \
            _layer.dX.shape == (N, M)

    profiler.disable()
    profiler.print_stats(sort="cumtime")
def test_020_fss_instance_properties_access_to_succeed():
    """
    Objective:
        Verify the layer class instance has initialized its properties.
    Expected:
        Layer parameter access to succeed
    """
    def objective(X: np.ndarray) -> Union[float, np.ndarray]:
        """Dummy objective function"""
        return np.sum(X)

    for _ in range(NUM_MAX_TEST_TIMES):
        name = random_string(np.random.randint(1, 10))
        M: int = np.random.randint(1, NUM_MAX_NODES)
        _layer = FeatureScaleShift(name=name,
                                   num_nodes=M,
                                   log_level=logging.DEBUG)
        _layer.objective = objective

        assert _layer.name == name
        assert _layer.num_nodes == M

        assert \
            _layer.gamma.dtype == TYPE_FLOAT and \
            _layer.gamma.shape == (M,) and \
            np.all(_layer.gamma == np.ones(M, dtype=TYPE_FLOAT))

        assert \
            _layer.dGamma.dtype == TYPE_FLOAT and \
            _layer.dGamma.shape == (M,)

        assert \
            _layer.beta.dtype == TYPE_FLOAT and \
            _layer.beta.shape == (M,) and \
            np.all(_layer.beta == np.zeros(M, dtype=TYPE_FLOAT))

        assert \
            _layer.dBeta.dtype == TYPE_FLOAT and \
            _layer.dBeta.shape == (M,)

        assert _layer.objective == objective
def test_020_fss_method_predict():
    """
    Objective:
        Verify the prediction function
    Expected:
        The objective
    """

    # pylint: disable=not-callable
    def objective(X: np.ndarray) -> Union[float, np.ndarray]:
        """Dummy objective function"""
        return np.sum(X, dtype=TYPE_FLOAT)

    profiler = cProfile.Profile()
    profiler.enable()

    for _ in range(NUM_MAX_TEST_TIMES):
        name = random_string(np.random.randint(1, 10))
        numexpr_enabled = bool(np.random.randint(0, 2))

        # For BN which works on statistics on per-feature basis,
        # no sense if M = 1 or N = 1.
        N: int = np.random.randint(2, NUM_MAX_BATCH_SIZE)
        M: int = np.random.randint(2, NUM_MAX_NODES)

        X = np.random.randn(N, M).astype(TYPE_FLOAT)

        _layer = _instance(name=name, num_nodes=M, log_level=logging.DEBUG)
        _layer.objective = objective
        Y = _layer.function(
            X,
            numexpr_enabled=numexpr_enabled,
        )
        # ********************************************************************************
        # Constraint: With only 1 invocation, predict should be the same with Y.
        # ********************************************************************************
        assert np.allclose(Y,
                           _layer.predict(X),
                           atol=TYPE_FLOAT(1e-9),
                           rtol=TYPE_FLOAT(0))
def test_020_std_method_function_multi_invocations_to_succeed():
    """
    Objective:
        Verify the _layer class instance function method
    Expected:
        Layer method calculate expected values.
    """
    def objective(x: TYPE_TENSOR):
        """Dummy objective function"""
        return np.sum(x)

    profiler = cProfile.Profile()
    profiler.enable()
    for _ in range(NUM_MAX_TEST_TIMES):
        name = random_string(np.random.randint(1, 10))
        numexpr_enabled = bool(np.random.randint(0, 2))

        # For which works on statistics on per-feature basis,
        # no sense if M = 1 or N = 1.
        N: int = np.random.randint(1, NUM_MAX_BATCH_SIZE)
        M: int = np.random.randint(2, NUM_MAX_NODES)

        X = np.random.randn(N, M).astype(TYPE_FLOAT)
        momentum = TYPE_FLOAT(np.random.uniform(0.7, 0.99))
        if np.random.uniform() < 0.5:
            eps = TYPE_FLOAT(np.random.uniform(1e-12, 1e-10))
        else:
            eps = TYPE_FLOAT(0.0)

        # ********************************************************************************
        # Constraint:
        #   _layer needs to reallocate X related storages upon X.shape[0] change.
        # ********************************************************************************
        _layer: Standardization = \
            _instance(name=name, num_nodes=M, momentum=momentum, eps=eps, log_level=logging.DEBUG)
        _layer.objective = objective

        for i in range(np.random.randint(1, 100)):
            _layer.function(
                X,
                numexpr_enabled=numexpr_enabled,
            )

        total_rows_processed = _layer.total_rows_processed
        ru = _layer.RU
        rsd = _layer.RSD

        while True:
            Z = np.random.randn(np.random.randint(1, NUM_MAX_BATCH_SIZE), M)
            if Z.shape[0] != N:
                Z = Z.astype(TYPE_FLOAT)
                break

        _layer.function(
            Z,
            numexpr_enabled=numexpr_enabled,
        )

        # ********************************************************************************
        # Constraint: Properties of Y, U, Xmd, SD should match those of Z
        # ********************************************************************************
        _validate_storage_allocation(_layer, Z)
        _validate_layer_values(_layer, Z, eps=eps)

        # ********************************************************************************
        # Constraint: Statistics is updated with Z
        # ********************************************************************************
        assert _layer.total_rows_processed == total_rows_processed + Z.shape[0]
        _validate_layer_running_statistics(
            _layer=_layer, previous_ru=ru, previous_rsd=rsd, X=Z, eps=eps
        )

    profiler.disable()
    profiler.print_stats(sort="cumtime")
def test_020_std_method_function_to_succeed():
    """
    Objective:
        Verify the _layer class instance function method
    Expected:
        Layer method calculate expected values.
    """
    def objective(x: TYPE_TENSOR):
        """Dummy objective function"""
        return np.sum(x, dtype=TYPE_FLOAT)

    profiler = cProfile.Profile()
    profiler.enable()
    for _ in range(NUM_MAX_TEST_TIMES):
        name = random_string(np.random.randint(1, 10))
        numexpr_enabled = bool(np.random.randint(0, 2))
        numba_enabled = bool(np.random.randint(0, 2))

        # For which works on statistics on per-feature basis,
        # no sense if M = 1 or N = 1.
        N: int = np.random.randint(1, NUM_MAX_BATCH_SIZE)
        M: int = np.random.randint(2, NUM_MAX_NODES)

        X = np.random.randn(N, M).astype(TYPE_FLOAT)
        momentum = TYPE_FLOAT(np.random.uniform(0.7, 0.99))
        eps = TYPE_FLOAT(np.random.uniform(1e-12, 1e-10)) \
            if np.random.uniform() < 0.5 else TYPE_FLOAT(0)
        _layer: Standardization = \
            _instance(name=name, num_nodes=M, momentum=momentum, eps=eps)
        _layer.objective = objective

        # ********************************************************************************
        # Constraint: total_rows_processed = times_of_invocations * N
        # ********************************************************************************
        assert _layer.total_rows_processed == 0
        ru = _layer.RU
        rsd = _layer.RSD
        _layer.function(
            X,
            numexpr_enabled=numexpr_enabled,
        )
        _validate_layer_values(_layer, X, eps=eps)
        _validate_layer_running_statistics(
            _layer=_layer, previous_ru=ru, previous_rsd=rsd, X=X, eps=eps
        )

        # ********************************************************************************
        # Constraint:
        #   _layer.N provides the latest X.shape[0]
        #   X related arrays should have its storage allocated and has the X.shape.
        #   * dX
        #   * dXmd01
        #   * dXmd02
        # ********************************************************************************
        assert _layer.N == X.shape[0]
        assert \
            _layer.dX.dtype == TYPE_FLOAT and \
            _layer.dX.shape == (N, M)

        assert \
            _layer.dXmd01.dtype == TYPE_FLOAT and \
            _layer.dXmd01.shape == (N, M)

        assert \
            _layer.dXmd02.dtype == TYPE_FLOAT and \
            _layer.dXmd02.shape == (N, M)
        assert _layer.total_rows_processed == N

        # ********************************************************************************
        # Constraint: total_rows_processed = times_of_invocations * N
        # ********************************************************************************
        for i in range(np.random.randint(1, 100)):
            _layer.function(
                X,
                numexpr_enabled=numexpr_enabled,
            )
            assert _layer.total_rows_processed == TYPE_INT(N * (i + 2))

    profiler.disable()
    profiler.print_stats(sort="cumtime")
def test_020_std_instance_properties_access_to_fail():
    """
    Objective:
        Verify the _layer class validates the parameters have been initialized before accessed.
    Expected:
        Initialization detects the access to the non-initialized parameters and fails.
    """
    msg = "Accessing uninitialized property of the _layer must fail."
    for _ in range(NUM_MAX_TEST_TIMES):
        name = random_string(np.random.randint(1, 10))
        M: int = np.random.randint(1, NUM_MAX_NODES)
        _layer = Standardization(
            name=name,
            num_nodes=M,
            log_level=logging.DEBUG
        )

        # --------------------------------------------------------------------------------
        # To pass
        # --------------------------------------------------------------------------------
        try:
            if not _layer.name == name:
                raise RuntimeError("_layer.name == name should be true")
        except AssertionError:
            raise RuntimeError("Access to name should be allowed as already initialized.")

        try:
            if not _layer.M == M:
                raise RuntimeError("_layer.M == M should be true")
        except AssertionError:
            raise RuntimeError("Access to M should be allowed as already initialized.")

        try:
            if not isinstance(_layer.logger, logging.Logger):
                raise RuntimeError("isinstance(_layer.logger, logging.Logger) should be true")
        except AssertionError:
            raise RuntimeError("Access to logger should be allowed as already initialized.")

        # --------------------------------------------------------------------------------
        # To fail
        # --------------------------------------------------------------------------------
        try:
            print(_layer.X)
            raise RuntimeError(msg)
        except AssertionError:
            pass

        try:
            print(_layer.N)
            raise RuntimeError(msg)
        except AssertionError:
            pass

        try:
            _layer.X = int(1)
            raise RuntimeError(msg)
        except AssertionError:
            pass

        try:
            print(_layer.dX)
            raise RuntimeError(msg)
        except AssertionError:
            pass

        try:
            print(_layer.Xmd)
            raise RuntimeError(msg)
        except AssertionError:
            pass

        try:
            print(_layer.dXmd01)
            raise RuntimeError(msg)
        except AssertionError:
            pass

        try:
            print(_layer.dXmd02)
            raise RuntimeError(msg)
        except AssertionError:
            pass

        try:
            print(_layer.Y)
            raise RuntimeError(msg)
        except AssertionError:
            pass
        try:
            _layer._Y = int(1)
            print(_layer.Y)
            raise RuntimeError(msg)
        except AssertionError:
            pass

        try:
            print(_layer.dY)
            raise RuntimeError(msg)
        except AssertionError:
            pass
        try:
            _layer._dY = int(1)
            print(_layer.dY)
            raise RuntimeError(msg)
        except AssertionError:
            pass

        try:
            # pylint: disable=not-callable
            _layer.objective(np.array(1.0, dtype=TYPE_FLOAT))
            raise RuntimeError(msg)
        except AssertionError:
            pass
def test_020_matmul_save_load():
    """
    Objective:
        Verify the load/save methods.

    Constraints:
        1. Be able to save the layer state.
        2. Be able to load the layer state and the state is same with the layer state S.
    """

    name = "test_020_matmul_save_load"

    def objective(X: np.ndarray) -> Union[float, np.ndarray]:
        """Dummy objective function to calculate the loss L"""
        return np.sum(X)

    profiler = cProfile.Profile()
    profiler.enable()

    for _ in range(NUM_MAX_TEST_TIMES):
        N: int = np.random.randint(1, NUM_MAX_BATCH_SIZE)
        M: int = np.random.randint(1, NUM_MAX_NODES)
        D: int = np.random.randint(1, NUM_MAX_FEATURES)

        name = "test_020_matmul_methods"
        matmul = _instantiate(name=name,
                              num_nodes=M,
                              num_features=D,
                              objective=objective)
        X = _generate_X(N, D)
        Y = matmul.function(X)
        matmul.gradient(Y)
        matmul.update()

        backup_S = copy.deepcopy(matmul.S)
        backup_W = copy.deepcopy(matmul.W)

        pathname = os.path.sep + os.path.sep.join(
            ["tmp", name + random_string(12) + ".pkl"])

        # ********************************************************************************
        # Constraint:
        #   load must fail with the saved file being deleted.
        #   Make sure load() is loading the path
        # ********************************************************************************
        matmul.save(pathname)
        path = pathlib.Path(pathname)
        path.unlink()
        try:
            msg = "load must fail with the saved file being deleted."
            matmul.load(pathname)
            raise AssertionError(msg)
        except RuntimeError as e:
            pass

        # ********************************************************************************
        # Constraint:
        #   load restore the state before save
        # ********************************************************************************
        matmul.save(pathname)
        matmul._W = np.zeros(shape=matmul.W.shape, dtype=TYPE_FLOAT)
        matmul.load(pathname)
        assert np.array_equal(backup_W, matmul.W), \
            "expected \n%s\n actual \n%s\n" % (backup_W, matmul.W)

        path = pathlib.Path(pathname)
        path.unlink()

        Y = matmul.function(X)
        matmul.gradient(Y)
        matmul.update()

    profiler.disable()
    profiler.print_stats(sort="cumtime")
def test_020_bn_method_function_validate_with_frederik_kratzert():
    """
    Objective:
        Verify the layer class instance function method calculates expected values
    Expected:
        Layer method calculate expected values.
    """
    def objective(x: np.ndarray):
        """Dummy objective function"""
        return np.sum(x)

    profiler = cProfile.Profile()
    profiler.enable()

    for _ in range(NUM_MAX_TEST_TIMES):
        name = random_string(np.random.randint(1, 10))
        numexpr_enabled = bool(np.random.randint(0, 2))
        numba_enabled = bool(np.random.randint(0, 2))

        # For BN which works on statistics on per-feature basis,
        # no sense if M = 1 or N = 1.
        N: int = np.random.randint(1, NUM_MAX_BATCH_SIZE)
        M: int = np.random.randint(2, NUM_MAX_NODES)

        X = np.random.rand(N, M).astype(TYPE_FLOAT)
        momentum = TYPE_FLOAT(np.random.uniform(0.7, 0.99))
        if np.random.uniform() < 0.5:
            eps = TYPE_FLOAT(np.random.uniform(1e-12, 1e-10))
        else:
            eps = TYPE_FLOAT(0.0)

        layer = BatchNormalization(name=name,
                                   num_nodes=M,
                                   momentum=momentum,
                                   eps=eps,
                                   log_level=logging.DEBUG)
        layer.objective = objective

        u = GRADIENT_DIFF_ACCEPTANCE_VALUE
        out, cache = batchnorm_forward(x=X,
                                       gamma=layer.gamma,
                                       beta=layer.beta,
                                       eps=eps)
        xhat, gamma, xmu, norm, sd, var, eps = cache

        Y = layer.function(X,
                           numexpr_enabled=numexpr_enabled,
                           numba_enabled=numba_enabled)

        # ********************************************************************************
        # Constraint: Xsd, X-U, Xmd, SD should match those of frederik_kratzert
        # ********************************************************************************
        assert np.allclose(Y, out, atol=u), \
            "Y=\n%s\nout=\n%s\ndiff=\n%s\n" \
            % (Y, out, (out-Y))

        assert np.allclose(layer.Xmd, xmu, atol=u), \
            "Xmd=\n%s\nxmu=\n%s\ndiff=\n%s\n" \
            % (layer.Xmd, xmu, (xmu-layer.Xmd))

        assert np.allclose(layer.SD, sd, atol=u), \
            "SD=\n%s\nsd=\n%s\ndiff=\n%s\n" \
            % (layer.SD, sd, (sd-layer.SD))

        assert np.allclose(layer.Xstd, xhat, atol=u), \
            "Xstd=\n%s\nxhat=\n%s\ndiff=\n%s\n" \
            % (layer.Xstd, xhat, (xhat-layer.Xstd))

    profiler.disable()
    profiler.print_stats(sort="cumtime")
def test_020_bn_instance_properties_access_to_succeed():
    """
    Objective:
        Verify the layer class instance has initialized its properties.
    Expected:
        Layer parameter access to succeed
    """
    def objective(x: np.ndarray):
        """Dummy objective function"""
        return np.sum(x)

    for _ in range(NUM_MAX_TEST_TIMES):
        name = random_string(np.random.randint(1, 10))
        M: int = np.random.randint(1, NUM_MAX_NODES)
        layer = BatchNormalization(name=name,
                                   num_nodes=M,
                                   log_level=logging.DEBUG)
        layer.objective = objective

        assert layer.name == name
        assert layer.num_nodes == M

        assert \
            layer.gamma.dtype == TYPE_FLOAT and \
            layer.gamma.shape == (M,) and \
            np.all(layer.gamma == np.ones(M, dtype=TYPE_FLOAT))

        assert \
            layer.dGamma.dtype == TYPE_FLOAT and \
            layer.dGamma.shape == (M,)

        assert \
            layer.beta.dtype == TYPE_FLOAT and \
            layer.beta.shape == (M,) and \
            np.all(layer.beta == np.zeros(M, dtype=TYPE_FLOAT))

        assert \
            layer.dBeta.dtype == TYPE_FLOAT and \
            layer.dBeta.shape == (M,)

        assert \
            layer.U.dtype == TYPE_FLOAT and \
            layer.U.shape == (M,)

        assert \
            layer.dU.dtype == TYPE_FLOAT and \
            layer.dU.size == M

        assert \
            layer.dV.dtype == TYPE_FLOAT and \
            layer.dV.size == M

        assert \
            layer.SD.dtype == TYPE_FLOAT and \
            layer.SD.shape == (M,)

        assert \
            layer.norm.dtype == TYPE_FLOAT and \
            layer.norm.shape == (M,)

        assert \
            layer.RU.dtype == TYPE_FLOAT and \
            layer.RU.shape == (M,)

        assert \
            layer.RSD.dtype == TYPE_FLOAT and \
            layer.RSD.shape == (M,)

        assert layer.objective == objective
def test_020_std_method_predict():
    """
    Objective:
        Verify the prediction function
    Expected:
        The objective
    """
    def objective(x: TYPE_TENSOR):
        """Dummy objective function"""
        return np.sum(x)

    profiler = cProfile.Profile()
    profiler.enable()

    for _ in range(NUM_MAX_TEST_TIMES):
        name = random_string(np.random.randint(1, 10))
        numexpr_enabled = bool(np.random.randint(0, 2))

        # For which works on statistics on per-feature basis,
        # no sense if M = 1 or N = 1.
        N: int = np.random.randint(2, NUM_MAX_BATCH_SIZE)
        M: int = np.random.randint(2, NUM_MAX_NODES)

        X = np.random.randn(N, M).astype(TYPE_FLOAT)
        momentum = TYPE_FLOAT(np.random.uniform(0.7, 0.99))
        if np.random.uniform() < 0.5:
            eps = TYPE_FLOAT(np.random.uniform(1e-12, 1e-8))
        else:
            eps = TYPE_FLOAT(0.0)

        _layer: Standardization = \
            _instance(
                name=name,
                num_nodes=M,
                momentum=momentum,
                eps=eps,
                log_level=logging.DEBUG
            )
        _layer.objective = objective
        Y = _layer.function(
            X,
            numexpr_enabled=numexpr_enabled
        )
        # ********************************************************************************
        # Constraint: With only 1 invocation, predict should be the same with Y.
        # RU = momentum * RU + (1 - momentum) * U
        # After the 1st invocation, RU==U. Then momentum * U + (1 - momentum) * U -> U
        # ********************************************************************************
        assert np.allclose(Y, _layer.predict(X), atol=TYPE_FLOAT(1e-9), rtol=TYPE_FLOAT(0))

        # ********************************************************************************
        # Constraint: At 2nd invocation, predict should be the same with
        #
        # ********************************************************************************
        Z = np.random.randn(N, M).astype(TYPE_FLOAT)
        standardized, mean, sd, deviation = standardize(Z, eps=eps, keepdims=False)
        expected_RU = _layer.RU * momentum + mean * (TYPE_FLOAT(1)-momentum)
        expected_RSD = _layer.RSD * momentum + sd * (TYPE_FLOAT(1)-momentum)
        _layer.function(
            Z,
            numexpr_enabled=numexpr_enabled
        )
        assert np.allclose(_layer.RU, expected_RU, atol=TYPE_FLOAT(1e-10), rtol=TYPE_FLOAT(0))
        assert np.allclose(_layer.RSD, expected_RSD, atol=TYPE_FLOAT(1e-10), rtol=TYPE_FLOAT(0))
def test_020_matmul_instance_properties():
    """
    Objective:
        Verify the layer class validates the parameters have been initialized before accessed.
    Expected:
        Initialization detects the access to the non-initialized parameters and fails.
    """
    msg = "Accessing uninitialized property of the layer must fail."

    for _ in range(NUM_MAX_TEST_TIMES):
        name = random_string(np.random.randint(1, 10))
        M: int = np.random.randint(1, NUM_MAX_NODES)
        D: int = np.random.randint(1, NUM_MAX_FEATURES)
        matmul = Matmul(name=name,
                        num_nodes=M,
                        W=weights.uniform(M, D + 1),
                        log_level=logging.DEBUG)

        # --------------------------------------------------------------------------------
        # To pass
        # --------------------------------------------------------------------------------
        try:
            if not matmul.name == name:
                raise RuntimeError("matmul.name == name should be true")
        except AssertionError as e:
            raise RuntimeError(
                "Access to name should be allowed as already initialized."
            ) from e

        try:
            if not matmul.M == M:
                raise RuntimeError("matmul.M == M should be true")
        except AssertionError as e:
            raise RuntimeError(
                "Access to M should be allowed as already initialized.") from e

        try:
            if not isinstance(matmul.logger, logging.Logger):
                raise RuntimeError(
                    "isinstance(matmul.logger, logging.Logger) should be true")
        except AssertionError as e:
            raise RuntimeError(
                "Access to logger should be allowed as already initialized."
            ) from e

        try:
            a = matmul.D
        except AssertionError:
            raise RuntimeError(
                "Access to D should be allowed as already initialized.")

        try:
            matmul.W is not None
        except AssertionError:
            raise RuntimeError(
                "Access to W should be allowed as already initialized.")

        try:
            matmul.optimizer is not None
        except AssertionError:
            raise RuntimeError(
                "Access to optimizer should be allowed as already initialized."
            )

        # --------------------------------------------------------------------------------
        # To fail
        # --------------------------------------------------------------------------------
        try:
            print(matmul.X)
            raise RuntimeError(msg)
        except AssertionError:
            pass

        try:
            matmul.X = int(1)
            raise RuntimeError(msg)
        except AssertionError:
            pass

        try:
            print(matmul.dX)
            raise RuntimeError(msg)
        except AssertionError:
            pass

        try:
            print(matmul.dW)
            raise RuntimeError(msg)
        except AssertionError:
            pass

        try:
            print(matmul.Y)
            raise RuntimeError(msg)
        except AssertionError:
            pass
        try:
            matmul._Y = int(1)
            print(matmul.Y)
            raise RuntimeError(msg)
        except AssertionError:
            pass

        try:
            print(matmul.dY)
            raise RuntimeError(msg)
        except AssertionError:
            pass
        try:
            matmul._dY = int(1)
            print(matmul.dY)
            raise RuntimeError(msg)
        except AssertionError:
            pass

        try:
            print(matmul.T)
            raise RuntimeError(msg)
        except AssertionError:
            pass

        try:
            matmul.T = float(1)
            raise RuntimeError(msg)
        except AssertionError:
            pass

        try:
            # pylint: disable=not-callable
            matmul.objective(np.array(1.0, dtype=TYPE_FLOAT))
            raise RuntimeError(msg)
        except AssertionError:
            pass

        try:
            print(matmul.N)
            raise RuntimeError(msg)
        except AssertionError:
            pass

        assert matmul.name == name
        assert matmul.num_nodes == M

        try:
            matmul = Matmul(name=name,
                            num_nodes=M,
                            W=weights.xavier(M, D + 1),
                            log_level=logging.DEBUG)
            matmul.function(int(1))
            raise RuntimeError("Invoke matmul.function(int(1)) must fail.")
        except AssertionError:
            pass

        try:
            matmul = Matmul(name=name,
                            num_nodes=M,
                            W=weights.xavier(M, D + 1),
                            log_level=logging.DEBUG)
            matmul.gradient(int(1))
            raise RuntimeError("Invoke matmul.gradient(int(1)) must fail.")
        except AssertionError:
            pass
def test_020_bn_method_gradient_validate_with_frederik_kratzert():
    """
    Objective:
        Verify the layer class instance gradient method calculates expected values
    Expected:
        Layer method calculate expected values.
    """
    if TYPE_FLOAT == np.float32:
        # TODO:
        "Need to investigate/redesign for 32 bit floating"
        return

    def objective(x: np.ndarray):
        """Dummy objective function"""
        return np.sum(x)

    profiler = cProfile.Profile()
    profiler.enable()

    for _ in range(NUM_MAX_TEST_TIMES):
        name = random_string(np.random.randint(1, 10))
        numexpr_enabled = bool(np.random.randint(0, 2))
        numba_enabled = bool(np.random.randint(0, 2))

        # For BN which works on statistics on per-feature basis,
        # no sense if M = 1 or N = 1.
        N: int = np.random.randint(1, NUM_MAX_BATCH_SIZE)
        M: int = np.random.randint(2, NUM_MAX_NODES)

        X = np.random.rand(N, M).astype(TYPE_FLOAT)
        momentum = TYPE_FLOAT(np.random.uniform(0.7, 0.99))
        if np.random.uniform() < 0.5:
            eps = TYPE_FLOAT(np.random.uniform(1e-12, 1e-10))
        else:
            eps = TYPE_FLOAT(0.0)

        _layer = BatchNormalization(name=name,
                                    num_nodes=M,
                                    momentum=momentum,
                                    eps=eps,
                                    log_level=logging.DEBUG)
        _layer.objective = objective

        u = GRADIENT_DIFF_ACCEPTANCE_VALUE
        dout = np.ones(X.shape)

        # --------------------------------------------------------------------------------
        # Benchmark (frederik_kratzert)
        # --------------------------------------------------------------------------------
        out, cache = batchnorm_forward(x=X,
                                       gamma=_layer.gamma,
                                       beta=_layer.beta,
                                       eps=eps)
        xhat, gamma, xmu, norm, sd, var, eps = cache
        dx, dgamma, dbeta, dxhat, dvar, dxmu2, dxmu1, dmu = batchnorm_backward(
            dout, cache)

        # ********************************************************************************
        # Constraint: layer gradients should match those of frederik_kratzert
        # ********************************************************************************
        _layer.function(X,
                        numexpr_enabled=numexpr_enabled,
                        numba_enabled=numba_enabled)
        _layer.gradient(dY=_layer.tensor_cast(dout, TYPE_FLOAT),
                        numexpr_enabled=numexpr_enabled,
                        numba_enabled=numba_enabled)
        assert np.allclose(_layer.dGamma, dgamma, atol=u), \
            "dGamma=\n%s\ndgamma=\n%s\ndiff=\n%s\n" \
            % (_layer.dGamma, dgamma, (dgamma-_layer.dGamma))

        assert np.allclose(_layer.dBeta, dbeta, atol=u), \
            "dBeta=\n%s\ndbeta=\n%s\ndiff=\n%s\n" \
            % (_layer.dBeta, dbeta, (dbeta - _layer.dBeta))

        assert np.allclose(_layer.dXstd, dxhat, atol=u), \
            "dXstd=\n%s\ndxhat=\n%s\ndiff=\n%s\n" \
            % (_layer.dXstd, dxhat, (dxhat - _layer.dXstd))

        assert np.allclose(_layer.dV, dvar, atol=u), \
            "dV=\n%s\ndvar=\n%s\ndiff=\n%s\n" \
            % (_layer.dV, dvar, (dvar - _layer.dV))

        assert np.allclose(_layer.dXmd01, dxmu2, atol=u), \
            "dXmd01=\n%s\ndxmu2=\n%s\ndiff=\n%s\n" \
            % (_layer.dXmd01, dxmu2, (dxmu2 - _layer.dXmd01))

        assert np.allclose(_layer.dXmd02, dxmu1, atol=u), \
            "dXmd02=\n%s\ndxmu1=\n%s\ndiff=\n%s\n" \
            % (_layer.dXmd02, dxmu1, (dxmu1 - _layer.dXmd02))

        assert np.allclose(_layer.dU, dmu, atol=u), \
            "dU=\n%s\ndmu=\n%s\ndiff=\n%s\n" \
            % (_layer.dU, dmu, (dmu - _layer.dU))

        assert np.allclose(_layer.dX, dx, atol=u), \
            "dX=\n%s\ndx=\n%s\ndiff=\n%s\n" \
            % (_layer.dX, dx, (dx - _layer.dX))

    profiler.disable()
    profiler.print_stats(sort="cumtime")
Exemple #18
0
def test_030_objective_instance_properties():
    """
    Objective:
        Verify the layer class validates the parameters have been initialized before accessed.
    Expected:
        Initialization detects the access to the non-initialized parameters and fails.
    """
    msg = "Accessing uninitialized property of the layer must fail."
    name = random_string(np.random.randint(1, 10))
    for _ in range(NUM_MAX_TEST_TIMES):
        M: int = np.random.randint(2, NUM_MAX_NODES)
        _layer = layer.CrossEntropyLogLoss(name=name,
                                           num_nodes=M,
                                           log_level=logging.DEBUG)

        # --------------------------------------------------------------------------------
        # To pass
        # --------------------------------------------------------------------------------
        try:
            if not _layer.name == name:
                raise RuntimeError("layer.name == name should be true")
        except AssertionError:
            raise RuntimeError(
                "Access to name should be allowed as already initialized.")

        try:
            if not _layer.M == M:
                raise RuntimeError("layer.M == M should be true")
        except AssertionError:
            raise RuntimeError(
                "Access to M should be allowed as already initialized.")

        try:
            if not isinstance(_layer.logger, logging.Logger):
                raise RuntimeError(
                    "isinstance(layer.logger, logging.Logger) should be true")
        except AssertionError:
            raise RuntimeError(
                "Access to logger should be allowed as already initialized.")

        # --------------------------------------------------------------------------------
        # To fail
        # --------------------------------------------------------------------------------
        try:
            print(_layer.X)
            raise RuntimeError(msg)
        except AssertionError:
            pass

        try:
            _layer.X = int(1)
            raise RuntimeError(msg)
        except AssertionError:
            pass

        try:
            print(_layer.N)
            raise RuntimeError(msg)
        except AssertionError:
            pass

        try:
            print(_layer.dX)
            raise RuntimeError(msg)
        except AssertionError:
            pass

        try:
            print(_layer.Y)
            raise RuntimeError(msg)
        except AssertionError:
            pass
        try:
            print(_layer.P)
            raise RuntimeError(msg)
        except AssertionError:
            pass
        try:
            _layer._Y = int(1)
            print(_layer.Y)
            raise RuntimeError(msg)
        except AssertionError:
            pass

        try:
            print(_layer.dY)
            raise RuntimeError(msg)
        except AssertionError:
            pass
        try:
            _layer._dY = int(1)
            print(_layer.dY)
            raise RuntimeError(msg)
        except AssertionError:
            pass

        try:
            print(_layer.T)
            raise RuntimeError(msg)
        except AssertionError:
            pass

        try:
            print(_layer.L)
            raise RuntimeError(msg)
        except AssertionError:
            pass

        try:
            print(_layer.J)
            raise RuntimeError(msg)
        except AssertionError:
            pass

        try:
            _layer.T = float(1)
            raise RuntimeError(msg)
        except AssertionError:
            pass

        try:
            _layer.function(int(1))
            raise RuntimeError("Invoke layer.function(int(1)) must fail.")
        except AssertionError:
            pass

        try:
            _layer.function(TYPE_FLOAT(1.0))
            _layer.gradient(int(1))
            raise RuntimeError("Invoke layer.gradient(int(1)) must fail.")
        except AssertionError:
            pass

        del _layer
def test_020_bn_method_gradient_descent():
    """
    Objective:
        Verify the gradient descent
    Expected:
        The objective decrease with the descents.
    """
    if TYPE_FLOAT == np.float32:
        # TODO:
        "Need to investigate/redesign for 32 bit floating"
        return

    def objective(x: np.ndarray):
        """Dummy objective function"""
        return np.sum(x)

    profiler = cProfile.Profile()
    profiler.enable()

    for _ in range(NUM_MAX_TEST_TIMES):
        name = random_string(np.random.randint(1, 10))
        numexpr_enabled = bool(np.random.randint(0, 2))

        # For BN which works on statistics on per-feature basis,
        # no sense if M = 1 or N = 1.
        N: int = np.random.randint(2, NUM_MAX_BATCH_SIZE)
        M: int = np.random.randint(2, NUM_MAX_NODES)

        # DO not use np.random.rand as test fails for 32 bit float
        X = np.random.rand(N, M).astype(TYPE_FLOAT)
        momentum = TYPE_FLOAT(np.random.uniform(0.7, 0.99))
        if np.random.uniform() < 0.5:
            eps = TYPE_FLOAT(np.random.uniform(1e-12, 1e-10))
        else:
            eps = TYPE_FLOAT(0.0)
        layer = BatchNormalization(name=name,
                                   num_nodes=M,
                                   momentum=momentum,
                                   eps=eps,
                                   log_level=logging.DEBUG)
        layer.objective = objective

        u = GRADIENT_DIFF_ACCEPTANCE_VALUE
        for _ in range(np.random.randint(1, 10)):
            dout = np.random.uniform(-1, 1, size=X.shape).astype(TYPE_FLOAT)

            Y = layer.function(
                X,
                numexpr_enabled=numexpr_enabled,
            )
            # pylint: disable=not-callable
            layer.objective(Y)
            layer.gradient(
                dY=dout,
                numexpr_enabled=numexpr_enabled,
            )
            dGamma, dBeta = layer.update()

            # ********************************************************************************
            # Constraint: expected gradients match with actual
            # ********************************************************************************
            expected_dGamma = np.sum(dout * layer.Xstd, axis=0)
            expected_dBeta = np.sum(dout, axis=0)
            assert np.allclose(expected_dGamma, dGamma, atol=u), \
                "Need dGamma\n%s\nbut\n%s\ndiff=\n%s\n" \
                % (expected_dGamma, dGamma, expected_dGamma-dGamma)
            assert np.allclose(expected_dBeta, dBeta, atol=u), \
                "Need dBeta\n%s\nbut\n%s\ndiff=\n%s\n" \
                % (expected_dBeta, dBeta, expected_dBeta-dBeta)