def test_010_sigmoid_cross_entropy_log_loss_2d(caplog):
    """
    Objective:
        Test case for sigmoid_cross_entropy_log_loss(X, T) =
        -( T * log(sigmoid(X)) + (1 -T) * log(1-sigmoid(X)) )

        For the input X of shape (N,1) and T in index format of shape (N,1),
        calculate the sigmoid log loss and verify the values are as expected.

    Expected:
        For  Z = sigmoid(X) = 1 / (1 + exp(-X)) and T=[[1]]
        Then -log(Z) should be almost same with sigmoid_cross_entropy_log_loss(X, T).
        Almost because finite float precision always has rounding errors.
    """
    # caplog.set_level(logging.DEBUG, logger=Logger.name)
    u = REFORMULA_DIFF_ACCEPTANCE_VALUE

    # --------------------------------------------------------------------------------
    # [Test case 01]
    # X:(N,M)=(1, 1). X=(x0) where x0=0 by which sigmoid(X) generates 0.5.
    # Expected:
    #   sigmoid_cross_entropy_log_loss(X, T) == -log(0.5)
    # --------------------------------------------------------------------------------
    X = np.array([[TYPE_FLOAT(0.0)]])
    T = np.array([TYPE_LABEL(1)])
    X, T = transform_X_T(X, T)
    E = -logarithm(np.array([TYPE_FLOAT(0.5)]))

    J, P = sigmoid_cross_entropy_log_loss(X, T)
    assert E.shape == J.shape
    assert np.all(E == J), \
        "Expected (E==J) but \n%s\nE=\n%s\nT=%s\nX=\n%s\nJ=\n%s\n" \
        % (np.abs(E - J), E, T, X, J)
    assert P == 0.5

    # --------------------------------------------------------------------------------
    # [Test case 02]
    # For X:(N,1)
    # --------------------------------------------------------------------------------
    for _ in range(NUM_MAX_TEST_TIMES):
        # X(N, M), and T(N,) in index label format
        N = np.random.randint(1, NUM_MAX_BATCH_SIZE)
        M = 1   # always 1 for binary classification 0 or 1.

        X = np.random.randn(N, M).astype(TYPE_FLOAT)
        T = np.random.randint(0, M, N).astype(TYPE_LABEL)
        X, T = transform_X_T(X, T)
        Logger.debug("T is %s\nX is \n%s\n", T, X)

        # ----------------------------------------------------------------------
        # Expected value EJ for J and Z for P
        # Note:
        #   To handle both index label format and OHE label format in the
        #   Loss layer(s), X and T are transformed into (N,1) shapes in
        #   transform_X_T(X, T) for logistic log loss.
        # DO NOT squeeze Z nor P.
        # ----------------------------------------------------------------------
        Z = sigmoid(X)
        EJ = np.squeeze(-(T * logarithm(Z) + TYPE_FLOAT(1-T) * logarithm(TYPE_FLOAT(1-Z))), axis=-1)

        # **********************************************************************
        # Constraint: Actual J should be close to EJ.
        # **********************************************************************
        J, P = sigmoid_cross_entropy_log_loss(X, T)
        assert EJ.shape == J.shape
        assert np.all(np.abs(EJ-J) < u), \
            "Expected abs(EJ-J) < %s but \n%s\nEJ=\n%s\nT=%s\nX=\n%s\nJ=\n%s\n" \
            % (u, np.abs(EJ-J), EJ, T, X, J)
        
        # **********************************************************************
        # Constraint: Actual P should be close to Z.
        # **********************************************************************
        assert np.all(np.abs(Z-P) < u), \
            "EP \n%s\nP\n%s\nEP-P \n%s\n" % (Z, P, Z-P)

        # ----------------------------------------------------------------------
        # L = cross_entropy_log_loss(P, T) should be close to J
        # ----------------------------------------------------------------------
        L = cross_entropy_log_loss(P=Z, T=T, f=logistic_log_loss)
        assert L.shape == J.shape
        assert np.all(np.abs(L-J) < u), \
            "Expected abs(L-J) < %s but \n%s\nL=\n%s\nT=%s\nX=\n%s\nJ=\n%s\n" \
            % (u, np.abs(L-J), L, T, X, J)
Esempio n. 2
0
def disabled_test_040_softmax_log_loss_2d(caplog):
    """
    TODO: Disabled as need to redesign numerical_jacobian for 32 bit floating.

    Objective:
        Verify the forward path constraints:
        1. Layer output L/loss is np.sum(softmax_cross_entropy_log_loss) / N.
        2. gradient_numerical() == numerical_jacobian(objective, X).

        Verify the backward path constraints:
        1. Analytical gradient G: gradient() == (P-1)/N
        2. Analytical gradient G is close to GN: gradient_numerical().
    """
    caplog.set_level(logging.DEBUG)

    # --------------------------------------------------------------------------------
    # Instantiate a CrossEntropyLogLoss layer
    # --------------------------------------------------------------------------------
    name = "test_040_softmax_log_loss_2d_ohe"

    profiler = cProfile.Profile()
    profiler.enable()

    for _ in range(NUM_MAX_TEST_TIMES):
        N: int = np.random.randint(1, NUM_MAX_BATCH_SIZE)
        M: int = np.random.randint(2, NUM_MAX_NODES)  # number of node > 1
        _layer = layer.CrossEntropyLogLoss(
            name=name,
            num_nodes=M,
            log_loss_function=softmax_cross_entropy_log_loss,
            log_level=logging.DEBUG)

        # ================================================================================
        # Layer forward path
        # ================================================================================
        X = np.random.randn(N, M).astype(TYPE_FLOAT)
        T = np.zeros_like(X, dtype=TYPE_LABEL)  # OHE labels.
        T[np.arange(N), np.random.randint(0, M, N)] = int(1)

        # log_loss function require (X, T) in X(N, M), and T(N, M) in index label format.
        X, T = transform_X_T(X, T)
        _layer.T = T
        Logger.debug("%s: X is \n%s\nT is \n%s", name, X, T)

        # --------------------------------------------------------------------------------
        # Expected analytical gradient EG = (dX/dL) = (A-T)/N
        # --------------------------------------------------------------------------------
        A = softmax(X)
        EG = np.copy(A)
        EG[np.arange(N), T] -= TYPE_FLOAT(
            1)  # Shape(N,), subtract from elements for T=1 only
        EG /= TYPE_FLOAT(N)

        # --------------------------------------------------------------------------------
        # Total loss Z = np.sum(J)/N
        # Expected loss EL = -sum(T*log(_A))
        # (J, P) = softmax_cross_entropy_log_loss(X, T) and J:shape(N,) where J:shape(N,)
        # is loss for each input and P is activation by sigmoid(X).
        # --------------------------------------------------------------------------------
        L = _layer.function(X)
        J, P = softmax_cross_entropy_log_loss(X, T)
        EL = np.array(-np.sum(logarithm(A[np.arange(N), T])) / N,
                      dtype=TYPE_FLOAT)

        # Constraint: A == P as they are sigmoid(X)
        assert np.all(np.abs(A-P) < ACTIVATION_DIFF_ACCEPTANCE_VALUE), \
            f"Need A==P==sigmoid(X) but A=\n{A}\n P=\n{P}\n(A-P)=\n{(A-P)}\n"

        # Constraint: Log loss layer output L == sum(J) from the log loss function
        Z = np.array(np.sum(J) / N, dtype=TYPE_FLOAT)
        assert np.array_equal(L, Z), \
            f"Need log loss layer output L == sum(J) but L=\n{L}\nZ=\n{Z}."

        # Constraint: L/loss is close to expected loss EL.
        assert np.all(np.abs(EL-L) < LOSS_DIFF_ACCEPTANCE_VALUE), \
            "Need EL close to L but \nEL=\n{EL}\nL=\n{L}\n"

        # constraint: gradient_numerical() == numerical_jacobian(objective, X)
        # TODO: compare the diff to accommodate numerical errors.
        GN = _layer.gradient_numerical()  # [dL/dX] from the layer

        def objective(x):
            """Function to calculate the scalar loss L for cross entropy log loss"""
            j, p = softmax_cross_entropy_log_loss(x, T)
            return np.array(np.sum(j) / N, dtype=TYPE_FLOAT)

        EGN = numerical_jacobian(objective, X)  # Expected numerical dL/dX
        assert np.array_equal(GN[0], EGN), \
            f"GN[0]==EGN expected but GN[0] is \n%s\n EGN is \n%s\n" % (GN[0], EGN)

        # ================================================================================
        # Layer backward path
        # ================================================================================

        # constraint: Analytical gradient G: gradient() == EG == (P-1)/N.
        dY = TYPE_FLOAT(1)
        G = _layer.gradient(dY)
        assert np.all(np.abs(G-EG) <= GRADIENT_DIFF_ACCEPTANCE_VALUE), \
            f"Layer gradient dL/dX \n{G} \nneeds to be \n{EG}."

        # constraint: Analytical gradient G is close to GN: gradient_numerical().
        assert \
            np.all(np.abs(G - GN[0]) <= GRADIENT_DIFF_ACCEPTANCE_VALUE) or \
            np.all(np.abs(G - GN[0]) <= np.abs(GRADIENT_DIFF_ACCEPTANCE_RATIO * GN[0])), \
            f"dX is \n{G}\nGN[0] is \n{GN[0]}\nRatio * GN[0] is \n{GRADIENT_DIFF_ACCEPTANCE_RATIO * GN[0]}.\n"

        # constraint: Gradient g of the log loss layer needs -1 < g < 1
        # abs(P-T) = abs(sigmoid(X)-T) cannot be > 1.
        assert np.all(np.abs(G) < 1), \
            f"Log loss layer gradient cannot be < -1 nor > 1 but\n{G}"
        assert np.all(np.abs(GN[0]) < (1+GRADIENT_DIFF_ACCEPTANCE_RATIO)), \
            f"Log loss layer gradient cannot be < -1 nor > 1 but\n{GN[0]}"

    profiler.disable()
    profiler.print_stats(sort="cumtime")
Esempio n. 3
0
def test_020_cross_entropy_log_loss_1d(caplog):
    """
    Objective:
        Test the categorical log loss values for P in 1 dimension.

    Constraints:
        1. The numerical gradient gn = (-t * logarithm(p+h) + t * logarithm(p-h)) / 2h.
        2. The numerical gradient gn is within +/- u within the analytical g = -T/P.

    P: Probabilities from softmax of shape (M,)
    M: Number of nodes in the cross_entropy_log_loss layer.
    T: Labels

    Note:
        log(P=1) -> 0
        dlog(x)/dx = 1/x
    """
    def f(P: np.ndarray, T: np.ndarray):
        return np.sum(cross_entropy_log_loss(P, T))

    # caplog.set_level(logging.DEBUG, logger=Logger.name)

    h: TYPE_FLOAT = OFFSET_DELTA
    u: TYPE_FLOAT = GRADIENT_DIFF_ACCEPTANCE_VALUE

    # --------------------------------------------------------------------------------
    # For (P, T): P[index] = True/1, OHE label T[index] = 1 where
    # P=[0,0,0,...,1,...0], T = [0,0,0,...1,...0]. T[i] == 1
    #
    # Do not forget the Jacobian shape is (N,) and calculate each element.
    # 1. For T=1, loss L = -log(Pi) = 0 and dL/dP=(1/Pi)= -1 is expected.
    # 2. For T=0, Loss L = (-log(0+offset+h)-log(0+offset-h)) / 2h = 0 is expected.
    # --------------------------------------------------------------------------------
    M: TYPE_INT = np.random.randint(2, NUM_MAX_NODES)
    index: TYPE_INT = TYPE_INT(np.random.randint(
        0, M))  # Position of the true label in P
    P1 = np.zeros(M, dtype=TYPE_FLOAT)
    P1[index] = TYPE_FLOAT(1.0)
    T1 = np.zeros(M, dtype=TYPE_LABEL)
    T1[index] = TYPE_LABEL(1)

    # Analytica correct gradient for P=1, T=1
    AG = np.zeros_like(P1, dtype=TYPE_FLOAT)
    AG[index] = TYPE_FLOAT(-1)  # dL/dP = -1

    EGN1 = np.zeros_like(P1, dtype=TYPE_FLOAT)  # Expected numerical gradient
    EGN1[index] = (-1 * logarithm(TYPE_FLOAT(1.0 + h)) + TYPE_FLOAT(1) *
                   logarithm(TYPE_FLOAT(1.0 - h))) / TYPE_FLOAT(2 * h)
    assert np.all(np.abs(EGN1-AG) < u), \
        "Expected EGN-1<%s but %s\nEGN=\n%s" % (u, (EGN1-AG), EGN1)

    GN1 = numerical_jacobian(partial(f, T=T1), P1)
    assert np.all(np.abs(GN1-AG) < u), \
        "Expected GN-1<%s but %s\nGN=\n%s" % (u, (GN1-AG), GN1)

    # The numerical gradient gn = (-t * logarithm(p+h) + t * logarithm(p-h)) / 2h
    assert GN1.shape == EGN1.shape
    assert np.all(np.abs(EGN1-GN1) < u), \
        "Expected GN1==EGN1 but GN1-EGN1=\n%sP=\n%s\nT=%s\nEGN=\n%s\nGN=\n%s\n" \
        % (np.abs(GN1-EGN1), P1, T1, EGN1, GN1)

    # The numerical gradient gn is within +/- u within the analytical g = -T/P
    G1 = np.zeros_like(P1, dtype=TYPE_FLOAT)
    G1[T1 == 1] = -1 * (T1[index] / P1[index])
    # G1[T1 != 0] = 0
    check.equal(np.all(np.abs(G1 - GN1) < u), True,
                "G1-GN1 %s\n" % np.abs(G1 - GN1))

    # --------------------------------------------------------------------------------
    # For (P, T): P[index] = np uniform(), index label T=index
    # --------------------------------------------------------------------------------
    for _ in range(NUM_MAX_TEST_TIMES):
        M = np.random.randint(2, NUM_MAX_NODES)  # M > 1
        T2 = TYPE_LABEL(np.random.randint(0, M))  # location of the truth
        P2 = np.zeros(M, dtype=TYPE_FLOAT)
        while not (x := TYPE_FLOAT(
                np.random.uniform(low=-BOUNDARY_SIGMOID,
                                  high=BOUNDARY_SIGMOID))):
            pass
        p = softmax(x)
        P2[T2] = p

        # --------------------------------------------------------------------------------
        # The Jacobian G shape is the same with P.shape.
        # G:[0, 0, ...,g, 0, ...] where Gi is numerical gradient close to -1/(1+k).
        # --------------------------------------------------------------------------------
        N2 = np.zeros_like(P2, dtype=TYPE_FLOAT)
        N2[T2] = TYPE_FLOAT(-1) * (logarithm(p + h) -
                                   logarithm(p - h)) / TYPE_FLOAT(2 * h)
        N2 = numerical_jacobian(partial(f, T=T2), P2)

        # The numerical gradient gn = (-t * logarithm(p+h) + t * logarithm(p-h)) / 2h
        assert N2.shape == N2.shape
        assert np.all(np.abs(N2-N2) < u), \
            f"Delta expected to be < {u} but \n{np.abs(N2-N2)}"

        G2 = np.zeros_like(P2, dtype=TYPE_FLOAT)
        G2[T2] = -1 / p

        # The numerical gradient gn is within +/- u within the analytical g = -T/P
        check.equal(np.all(np.abs(G2 - N2) < u), True,
                    "G2-N2 %s\n" % np.abs(G2 - N2))
Esempio n. 4
0
def test_020_cross_entropy_log_loss_2d(caplog):
    """
    Objective:
        Test case for cross_entropy_log_loss(X, T) for X:shape(N,M), T:shape(N,)
    Expected:
    """
    def f(P: np.ndarray, T: np.ndarray):
        """Loss function"""
        # For P.ndim==2 of shape (N, M), cross_entropy_log_loss() returns (N,).
        # Each of which has the loss for P[n].
        # If divided by P.shape[0] or N, the loss gets 1/N, which is wrong.
        # This is not a gradient function but a loss function.
        # return np.sum(cross_entropy_log_loss(P, T)) / P.shape[0]

        return np.sum(cross_entropy_log_loss(P, T))

    # caplog.set_level(logging.DEBUG, logger=Logger.name)

    h: TYPE_FLOAT = OFFSET_DELTA
    u: TYPE_FLOAT = GRADIENT_DIFF_ACCEPTANCE_VALUE

    for _ in range(NUM_MAX_TEST_TIMES):
        # --------------------------------------------------------------------------------
        # [2D test case]
        # P:(N, M) is probability matrix where Pnm = p, 0 <=n<N-1, 0<=m<M-1
        # T:(N,)   is index label where Tn=m is label as integer k.g. m=3 for 3rd label.
        # Pnm = log(P[i][j])
        # L = -log(p), -> dlog(P)/dP -> -1 / (p)
        #
        # Keep p value away from 0. As p gets close to 0, the log(p+/-h) gets large e.g
        # -11.512925464970229, hence log(p+/-h) / 2h explodes.
        # --------------------------------------------------------------------------------
        while not (x := TYPE_FLOAT(
                np.random.uniform(low=-BOUNDARY_SIGMOID,
                                  high=BOUNDARY_SIGMOID))):
            pass
        p = softmax(x)
        N = np.random.randint(1, NUM_MAX_BATCH_SIZE)
        M = np.random.randint(2, NUM_MAX_NODES)
        # label index, not OHE
        T = np.random.randint(0, M, N).astype(
            TYPE_LABEL)  # N rows of labels, max label value is M-1
        P = np.zeros((N, M)).astype(TYPE_FLOAT)
        P[range(N),  # Set p at random row position
          T] = p
        E = np.zeros_like(P).astype(TYPE_FLOAT)
        E[range(N),  # Set p at random row position
          T] = (TYPE_FLOAT(-1) * logarithm(p + h) +
                TYPE_FLOAT(1) * logarithm(p - h)) / (TYPE_FLOAT(2) * h)

        G = numerical_jacobian(partial(f, T=T), P)
        assert E.shape == G.shape, \
            f"Jacobian shape is expected to be {E.shape} but {G.shape}."
        assert np.all(np.abs(E-G) < u), \
            f"Delta expected to be < {u} but \n{np.abs(E-G)}"

        A = np.zeros_like(P).astype(TYPE_FLOAT)
        A[range(N),  # Set p at random row position
          T] = -1 / p

        check.equal(np.all(np.abs(A - G) < u), True,
                    "A-G %s\n" % np.abs(A - G))
Esempio n. 5
0
        index = np.random.randint(0, M)  # location of the truth
        while not (x := TYPE_FLOAT(
                np.random.uniform(low=-BOUNDARY_SIGMOID,
                                  high=BOUNDARY_SIGMOID))):
            pass
        p = softmax(x)
        P3 = np.zeros(M, dtype=TYPE_FLOAT)
        P3[index] = p
        T3 = np.zeros(M).astype(TYPE_LABEL)  # OHE index
        T3[index] = TYPE_LABEL(1)

        # --------------------------------------------------------------------------------
        # The Jacobian G shape is the same with P.shape.
        # --------------------------------------------------------------------------------
        N3 = np.zeros_like(P3, dtype=TYPE_FLOAT)
        N3[index] = TYPE_FLOAT(-1 * logarithm(p + h) +
                               1 * logarithm(p - h)) / TYPE_FLOAT(2 * h)
        N3 = numerical_jacobian(partial(f, T=T3), P3)
        assert N3.shape == N3.shape
        assert np.all(np.abs(N3-N3) < u), \
            f"Delta expected to be < {u} but \n{np.abs(N3-N3)}"

        G3 = np.zeros_like(P3, dtype=TYPE_FLOAT)
        G3[index] = -1 / p
        check.equal(np.all(np.abs(G3 - N3) < u), True,
                    "G3-N3 %s\n" % np.abs(G3 - N3))

        # --------------------------------------------------------------------------------
        # [1D test case]
        # For 1D OHE array P [0, 0, ..., 1, 0, ...] where Pi = 1.
        # For 1D OHE array T [0, 0, ..., 0, 1, ...] where Tj = 1 and i != j
def test_010_softmax_cross_entropy_log_loss_2d(caplog):
    """
    Objective:
        Test case for softmax_cross_entropy_log_loss(X, T) = -T * log(softmax(X))

        For the input X of shape (N,M) and T in index format of shape (N,),
        calculate the softmax log loss and verify the values are as expected.

    Expected:
        For  P = softmax(X) = exp(-X) / sum(exp(-X))
        _P = P[
          np.arange(N),
          T
        ] selects the probability p for the correct input x.
        Then -log(_P) should be almost same with softmax_cross_entropy_log_loss(X, T).
        Almost because finite float precision always has rounding errors.
    """
    # caplog.set_level(logging.DEBUG, logger=Logger.name)
    u = REFORMULA_DIFF_ACCEPTANCE_VALUE

    # --------------------------------------------------------------------------------
    # [Test case 01]
    # N: Batch size, M: Number of features in X
    # X:(N,M)=(1, 2). X=(x0, x1) where x0 == x1 == 0.5 by which softmax(X) generates equal
    # probability P=(p0, p1) where p0 == p1.
    # Expected:
    #   softmax(X) generates the same with X.
    #   softmax_cross_entropy_log_loss(X, T) == -log(0.5)
    # --------------------------------------------------------------------------------
    X = np.array([[0.5, 0.5]]).astype(TYPE_FLOAT)
    T = np.array([1]).astype(TYPE_LABEL)
    E = -logarithm(np.array([0.5]).astype(TYPE_FLOAT))

    P = softmax(X)
    assert np.array_equal(X, P)

    J, _ = softmax_cross_entropy_log_loss(X, T)
    assert (E.shape == J.shape)
    assert np.all(np.abs(E - J) < u), \
        "Expected abs(E-J) < %s but \n%s\nE=\n%s\nT=%s\nX=\n%s\nJ=\n%s\n" \
        % (u, np.abs(E - J), E, T, X, J)
    assert np.all(np.abs(P - _) < u)

    # --------------------------------------------------------------------------------
    # [Test case 01]
    # For X:(N,M)
    # --------------------------------------------------------------------------------
    for _ in range(NUM_MAX_TEST_TIMES):
        # X(N, M), and T(N,) in index label format
        N = np.random.randint(1, NUM_MAX_BATCH_SIZE)
        M = np.random.randint(2, NUM_MAX_NODES)

        X = np.random.randn(N, M).astype(TYPE_FLOAT)
        T = np.random.randint(0, M, N).astype(TYPE_LABEL)
        Logger.debug("T is %s\nX is \n%s\n", T, X)

        # ----------------------------------------------------------------------
        # Expected value E = -logarithm(_P)
        # ----------------------------------------------------------------------
        P = softmax(X)
        _P = P[np.arange(
            N
        ), T]  # Probability of p for the correct input x, which generates j=-log(p)

        E = -logarithm(_P)

        # ----------------------------------------------------------------------
        # Actual J should be close to E.
        # ----------------------------------------------------------------------
        J, _ = softmax_cross_entropy_log_loss(X, T)
        assert (E.shape == J.shape)
        assert np.all(np.abs(E-J) < u), \
            "Expected abs(E-J) < %s but \n%s\nE=\n%s\nT=%s\nX=\n%s\nJ=\n%s\n" \
            % (u, np.abs(E - J), E, T, X, J)

        # ----------------------------------------------------------------------
        # L = cross_entropy_log_loss(P, T) should be close to J
        # ----------------------------------------------------------------------
        L = cross_entropy_log_loss(P, T)
        assert (L.shape == J.shape)
        assert np.all(np.abs(L-J) < u), \
            "Expected abs(L-J) < %s but \n%s\nL=\n%s\nT=%s\nX=\n%s\nJ=\n%s\n" \
            % (u, np.abs(E - J), E, T, X, J)