예제 #1
0
def test_cross_entropy_with_softmax():
    data1_shape = (1, 2)
    label1_shape = (1, )
    data2_shape = (1, 3)
    label2_shape = (1, )

    data1 = np.array([1, 0.5], dtype=np.float32).reshape(data1_shape)
    label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
    expect1 = F.cross_entropy(F.softmax(tensor(data1)), tensor(label1)).numpy()

    data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
    label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
    expect2 = F.cross_entropy(F.softmax(tensor(data2)), tensor(label2)).numpy()

    cases = [
        {
            "input": [data1, label1],
            "output": expect1,
        },
        {
            "input": [data2, label2],
            "output": expect2,
        },
    ]
    opr_test(cases, F.cross_entropy_with_softmax)
예제 #2
0
def test_binary_cross_entropy():
    data1_shape = (2, 2)
    label1_shape = (2, 2)
    data2_shape = (2, 3)
    label2_shape = (2, 3)

    def sigmoid(x):
        return 1 / (1 + np.exp(-x))

    def compare_fn(x, y):
        assertTensorClose(x.numpy(), y, max_err=5e-4)

    np.random.seed(123)
    data1 = sigmoid(np.random.uniform(size=data1_shape).astype(np.float32))
    label1 = np.random.uniform(size=label1_shape).astype(np.float32)
    expect1 = np.array([0.6361], dtype=np.float32)

    np.random.seed(123)
    data2 = sigmoid(np.random.uniform(size=data2_shape).astype(np.float32))
    label2 = np.random.uniform(size=label2_shape).astype(np.float32)
    expect2 = np.array([0.6750], dtype=np.float32)

    cases = [
        {
            "input": [data1, label1],
            "output": expect1,
        },
        {
            "input": [data2, label2],
            "output": expect2,
        },
    ]
    opr_test(cases, F.binary_cross_entropy, compare_fn=compare_fn)
예제 #3
0
def test_cross_entropy():
    data1_shape = (1, 2)
    label1_shape = (1, )
    data2_shape = (1, 3)
    label2_shape = (1, )

    data1 = np.array([0.5, 0.5], dtype=np.float32).reshape(data1_shape)
    label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
    expect1 = np.array([-np.log(0.5)], dtype=np.float32)

    data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
    label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
    expect2 = np.array([-np.log(0.4)], dtype=np.float32)

    cases = [
        {
            "input": [data1, label1],
            "output": expect1,
        },
        {
            "input": [data2, label2],
            "output": expect2,
        },
    ]
    opr_test(cases, F.cross_entropy)
예제 #4
0
def test_eye():
    dtype = np.float32
    cases = [{"input": [10, 20]}, {"input": [20, 30]}]
    opr_test(cases,
             F.eye,
             ref_fn=lambda n, m: np.eye(n, m).astype(dtype),
             dtype=dtype)
예제 #5
0
def test_linspace():
    cases = [
        {
            "input": [1, 9, 9]
        },
        {
            "input": [3, 10, 8]
        },
    ]
    opr_test(
        cases,
        F.linspace,
        ref_fn=lambda start, end, step: np.linspace(
            start, end, step, dtype=np.float32),
    )

    cases = [
        {
            "input": [9, 1, 9]
        },
        {
            "input": [10, 3, 8]
        },
    ]
    opr_test(
        cases,
        F.linspace,
        ref_fn=lambda start, end, step: np.linspace(
            start, end, step, dtype=np.float32),
    )
예제 #6
0
def common_test_reduce(opr, ref_opr):
    data1_shape = (5, 6, 7)
    data2_shape = (2, 9, 12)
    data1 = np.random.random(data1_shape).astype(np.float32)
    data2 = np.random.random(data2_shape).astype(np.float32)
    cases = [{"input": data1}, {"input": data2}]

    if opr not in (F.argmin, F.argmax):
        opr_test(cases, opr, ref_fn=ref_opr)

        axis = 2
        opr_test(cases, opr, ref_fn=lambda x: ref_opr(x, axis=axis), axis=axis)

        axis = 2
        keepdims = True
        opr_test(
            cases,
            opr,
            ref_fn=lambda x: ref_opr(x, axis=axis, keepdims=keepdims),
            axis=axis,
            keepdims=keepdims,
        )
    else:
        opr_test(cases, opr, ref_fn=lambda x: ref_opr(x).astype(np.int32))

        axis = 2
        opr_test(
            cases,
            opr,
            ref_fn=lambda x: ref_opr(x, axis=axis).astype(np.int32),
            axis=axis,
        )
예제 #7
0
def test_round():
    data1_shape = (15, )
    data2_shape = (25, )
    data1 = np.random.random(data1_shape).astype(np.float32)
    data2 = np.random.random(data2_shape).astype(np.float32)

    cases = [{"input": data1}, {"input": data2}]
    opr_test(cases, F.round, ref_fn=np.round)
예제 #8
0
def test_sqrt():
    d1_shape = (15, )
    d2_shape = (25, )
    d1 = np.random.random(d1_shape).astype(np.float32)
    d2 = np.random.random(d2_shape).astype(np.float32)

    cases = [{"input": d1}, {"input": d2}]
    opr_test(cases, F.sqrt, ref_fn=np.sqrt)
예제 #9
0
def test_matrix_mul():
    shape1 = (2, 3)
    shape2 = (3, 4)
    shape3 = (4, 5)
    data1 = np.random.random(shape1).astype("float32")
    data2 = np.random.random(shape2).astype("float32")
    data3 = np.random.random(shape3).astype("float32")

    cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
    opr_test(cases, F.matrix_mul, ref_fn=np.matmul)
예제 #10
0
def test_logsumexp():
    x = np.arange(10).astype(np.float32)
    expected = np.log(np.sum(np.exp(x)))
    cases = [{"input": x, "output": expected}]
    compare_fn = partial(assertTensorClose, allow_special_values=True)
    # large value check
    n = 100
    x = np.full(n, 10000, dtype=np.float32)
    expected = 10000 + np.log(n)
    cases.append({"input": x, "output": expected.astype(np.float32)})
    opr_test(cases, F.logsumexp, axis=0, compare_fn=compare_fn)

    # special value check
    x = np.array([np.inf], dtype=np.float32)
    expected = x
    cases = [{"input": x, "output": expected}]

    x = np.array([-np.inf, 0.0], dtype=np.float32)
    expected = np.zeros(1).astype(np.float32)
    cases.append({"input": x, "output": expected})
    opr_test(cases, F.logsumexp, axis=0, compare_fn=compare_fn)

    x = np.array([np.nan], dtype=np.float32)
    expected = x
    cases = [{"input": x, "output": expected}]

    x = np.array([-np.inf, 1], dtype=np.float32)
    expected = np.array([1.0], dtype=np.float32)
    cases.append({"input": x, "output": expected})

    opr_test(cases, F.logsumexp, axis=0, compare_fn=compare_fn)

    # keepdims check
    x = np.array([[1e10, 1e-10], [-1e10, -np.inf]], dtype=np.float32)
    expected = np.array([[1e10], [-1e10]], dtype=np.float32)
    cases = [{"input": x, "output": expected}]
    x = np.array([[1e10, -1e-10, 1e-10], [1e10, 1e-10, np.inf]],
                 dtype=np.float32)
    expected = np.array([[1e10], [np.inf]], dtype=np.float32)
    cases.append({"input": x, "output": expected})
    opr_test(cases, F.logsumexp, axis=1, keepdims=True, compare_fn=compare_fn)

    # multiple axes check
    x = np.array([[1e10, 1e-10], [-1e10, -np.inf]], dtype=np.float32)
    expected = np.array([1e10], dtype=np.float32)
    cases = [{"input": x, "output": expected}]
    x = np.array([[1e10, -1e-10, 1e-10], [1e10, 1e-10, np.inf]],
                 dtype=np.float32)
    expected = np.array([np.inf], dtype=np.float32)
    cases.append({"input": x, "output": expected})
    opr_test(cases,
             F.logsumexp,
             axis=(0, 1),
             keepdims=False,
             compare_fn=compare_fn)
예제 #11
0
def test_smooth_l1_loss():
    np.random.seed(123)
    cases = []
    for shape in [(2, 2), (2, 3)]:
        data = np.random.uniform(size=shape).astype(np.float32)
        label = np.random.uniform(size=shape).astype(np.float32)
        diff = np.abs(data - label)
        expect = np.where(diff < 1, 0.5 * diff**2, diff - 0.5).mean()
        cases.append({"input": [data, label], "output": tensor(expect)})

    opr_test(cases, F.smooth_l1_loss)
예제 #12
0
def test_where():
    maskv0 = np.array([[1, 0], [0, 1]], dtype=np.int32)
    xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
    yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)

    maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.int32)
    xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]],
                   dtype=np.float32)
    yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)

    cases = [{"input": [maskv0, xv0, yv0]}, {"input": [maskv1, xv1, yv1]}]
    opr_test(cases, F.where, ref_fn=np.where)
예제 #13
0
def test_concat():
    def get_data_shape(length: int):
        return (length, 2, 3)

    data1 = np.random.random(get_data_shape(5)).astype("float32")
    data2 = np.random.random(get_data_shape(6)).astype("float32")
    data3 = np.random.random(get_data_shape(7)).astype("float32")

    def run(data1, data2):
        return F.concat([data1, data2])

    cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
    opr_test(cases, run, ref_fn=lambda x, y: np.concatenate([x, y]))
예제 #14
0
def test_normalize():

    cases = [{
        "input": np.random.random((2, 3, 12, 12)).astype(np.float32)
    } for i in range(2)]

    def np_normalize(x, p=2, axis=None, eps=1e-12):
        if axis is None:
            norm = np.sum(x**p)**(1.0 / p)
        else:
            norm = np.sum(x**p, axis=axis, keepdims=True)**(1.0 / p)
        return x / np.clip(norm, a_min=eps, a_max=np.inf)

    # Test L-2 norm along all dimensions
    opr_test(cases, F.normalize, ref_fn=np_normalize)

    # Test L-1 norm along all dimensions
    opr_test(cases,
             partial(F.normalize, p=1),
             ref_fn=partial(np_normalize, p=1))

    # Test L-2 norm along the second dimension
    opr_test(cases,
             partial(F.normalize, axis=1),
             ref_fn=partial(np_normalize, axis=1))

    # Test some norm == 0
    cases[0]["input"][0, 0, 0, :] = 0
    cases[1]["input"][0, 0, 0, :] = 0
    opr_test(cases,
             partial(F.normalize, axis=3),
             ref_fn=partial(np_normalize, axis=3))
예제 #15
0
def test_sort():
    data1_shape = (10, 3)
    data2_shape = (12, 2)
    data1 = np.random.random(data1_shape).astype(np.float32)
    data2 = np.random.random(data2_shape).astype(np.float32)
    output0 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
    output1 = [np.sort(data2), np.argsort(data2).astype(np.int32)]

    cases = [
        {
            "input": data1,
            "output": output0
        },
        {
            "input": data2,
            "output": output1
        },
    ]
    opr_test(cases, F.sort)
예제 #16
0
def test_flatten():
    data0_shape = (2, 3, 4, 5)
    data1_shape = (4, 5, 6, 7)
    data0 = np.random.random(data0_shape).astype(np.float32)
    data1 = np.random.random(data1_shape).astype(np.float32)

    def compare_fn(x, y):
        assert x.numpy().shape == y

    output0 = (2 * 3 * 4 * 5, )
    output1 = (4 * 5 * 6 * 7, )
    cases = [{
        "input": data0,
        "output": output0
    }, {
        "input": data1,
        "output": output1
    }]
    opr_test(cases, F.flatten, compare_fn=compare_fn)

    output0 = (2, 3 * 4 * 5)
    output1 = (4, 5 * 6 * 7)
    cases = [{
        "input": data0,
        "output": output0
    }, {
        "input": data1,
        "output": output1
    }]
    opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1)

    output0 = (2, 3, 4 * 5)
    output1 = (4, 5, 6 * 7)
    cases = [{
        "input": data0,
        "output": output0
    }, {
        "input": data1,
        "output": output1
    }]
    opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=2)

    output0 = (2, 3 * 4, 5)
    output1 = (4, 5 * 6, 7)
    cases = [{
        "input": data0,
        "output": output0
    }, {
        "input": data1,
        "output": output1
    }]
    opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)
예제 #17
0
def test_batched_matrix_mul():
    batch_size = 10
    shape1 = (batch_size, 2, 3)
    shape2 = (batch_size, 3, 4)
    shape3 = (batch_size, 4, 5)
    data1 = np.random.random(shape1).astype("float32")
    data2 = np.random.random(shape2).astype("float32")
    data3 = np.random.random(shape3).astype("float32")

    cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
    for i in range(0, batch_size):

        def compare_fn(x, y):
            x.numpy()[i, ...] == y

        opr_test(
            cases,
            F.batched_matrix_mul,
            compare_fn=compare_fn,
            ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
        )
예제 #18
0
def test_broadcast_to():
    input1_shape = (20, 30)
    output1_shape = (30, 20, 30)
    data1 = np.random.random(input1_shape).astype(np.float32)

    input2_shape = (10, 20)
    output2_shape = (20, 10, 20)
    data2 = np.random.random(input2_shape).astype(np.float32)

    def compare_fn(x, y):
        assert x.numpy().shape == y

    cases = [
        {
            "input": [data1, output1_shape],
            "output": output1_shape
        },
        {
            "input": [data2, output2_shape],
            "output": output2_shape
        },
    ]
    opr_test(cases, F.broadcast_to, compare_fn=compare_fn)
예제 #19
0
def common_test_reduce(opr, ref_opr):
    data1_shape = (5, 6, 7)
    data2_shape = (2, 9, 12)
    data1 = np.random.random(data1_shape).astype(np.float32)
    data2 = np.random.random(data2_shape).astype(np.float32)
    cases = [{"input": data1}, {"input": data2}]

    if opr not in (F.argmin, F.argmax):
        # test default axis
        opr_test(cases, opr, ref_fn=ref_opr)
        # test all axises in range of input shape
        for axis in range(-3, 3):
            # test keepdims False
            opr_test(cases,
                     opr,
                     ref_fn=lambda x: ref_opr(x, axis=axis),
                     axis=axis)
            # test keepdims True
            opr_test(
                cases,
                opr,
                ref_fn=lambda x: ref_opr(x, axis=axis, keepdims=True),
                axis=axis,
                keepdims=True,
            )
    else:
        # test defaut axis
        opr_test(cases, opr, ref_fn=lambda x: ref_opr(x).astype(np.int32))
        # test all axises in range of input shape
        for axis in range(0, 3):
            opr_test(
                cases,
                opr,
                ref_fn=lambda x: ref_opr(x, axis=axis).astype(np.int32),
                axis=axis,
            )
예제 #20
0
def test_hinge_loss():
    np.random.seed(123)
    # case with L1 norm
    cases = []
    for shape in [(2, 2), (2, 3)]:
        data = np.random.uniform(size=shape).astype(np.float32)
        label = 2 * np.random.randint(0, 1, size=shape).astype(np.int32) - 1
        expect = np.clip(0, np.inf, 1 - data * label).sum(axis=1).mean()
        cases.append({"input": [data, label], "output": tensor(expect)})

    opr_test(cases, F.hinge_loss)

    # cases with L2 norm
    cases = []
    for shape in [(2, 2), (2, 3)]:
        data = np.random.uniform(size=shape).astype(np.float32)
        label = 2 * np.random.randint(0, 1, size=shape).astype(np.int32) - 1
        expect = ((np.clip(0, np.inf, 1 - data * label)**2).sum(axis=1)).mean()
        cases.append({"input": [data, label], "output": tensor(expect)})

    def hinge_loss_with_l2_norm(pred, label):
        return F.hinge_loss(pred, label, "L2")

    opr_test(cases, hinge_loss_with_l2_norm)
예제 #21
0
def test_arange():
    cases = [
        {
            "input": [1, 9, 1]
        },
        {
            "input": [2, 10, 2]
        },
    ]
    opr_test(
        cases,
        F.arange,
        ref_fn=lambda start, end, step: np.arange(
            start, end, step, dtype=np.float32),
    )

    cases = [
        {
            "input": [9, 1, -1]
        },
        {
            "input": [10, 2, -2]
        },
    ]
    opr_test(
        cases,
        F.arange,
        ref_fn=lambda start, end, step: np.arange(
            start, end, step, dtype=np.float32),
    )

    cases = [
        {
            "input": [9.3, 1.2, -0.5]
        },
        {
            "input": [10.3, 2.1, -1.7]
        },
    ]
    opr_test(
        cases,
        F.arange,
        ref_fn=lambda start, end, step: np.arange(
            start, end, step, dtype=np.float32),
    )