示例#1
0
def test_op_reduce_mean_all_constant(input_data, axis, device_id, precision):
    dt = PRECISION_TO_TYPE[precision]
    value = AA(input_data, dtype=dt)
    from .. import reduce_mean
    from cntk import Axis, Constant
    a = Constant(value, name='a')
    input_op = reduce_mean(a, axis=Axis.all_axes())
    expected_forward = AA(np.mean(value))
    actual_forward = input_op.eval()
    assert np.allclose(actual_forward, expected_forward)
示例#2
0
def test_op_reduce_mean_all_constant(input_data, axis, device_id, precision):
    dt = PRECISION_TO_TYPE[precision]
    value = AA(input_data, dtype=dt)
    from .. import reduce_mean
    from cntk import Axis, Constant
    a = Constant(value, name='a')
    input_op = reduce_mean(a, axis=Axis.all_axes())
    expected_forward = AA(np.mean(value))
    actual_forward  = input_op.eval()
    assert np.allclose(actual_forward, expected_forward)
示例#3
0
def test_op_reduce_all(input_data, axis, device_id, precision):
    # FIXME: we'd like to do dt = PRECISION_TO_TYPE[precision]
    # however there seems to be an issue with actual_forward below
    # that gets computed correctly but by the time np.allclose executes
    # it contains garbage values. The problem goes away if one uses
    # actual_forward  = np.copy(input_op.eval(binding))
    dt = np.float32
    data = AA(input_data, dtype=dt)
    a = I(shape=data.shape,
          dtype=sanitize_dtype_cntk(dt),
          needs_gradient=True,
          name='a')
    # create batch
    value = [AA([data, data - 0.5], dtype=dt), AA([data + 0.25], dtype=dt)]
    from .. import reduce_sum, reduce_max, reduce_min, reduce_mean, reduce_log_sum_exp, reduce_prod
    from cntk import Axis

    def max_bwd(x, f):
        y = np.zeros_like(x)
        yr = y.ravel()
        xr = x.ravel()
        for i in range(x.size):
            if xr[i] == f: yr[i] = 1
        return y

    ops = [
        (reduce_sum, lambda x: AA(sum(np.sum(xi) for xi in x)),
         lambda x, f: [np.ones_like(xi) for xi in x]),
        (reduce_max, lambda x: AA(max(np.max(xi) for xi in x)),
         lambda x, f: [max_bwd(xi, f) for xi in x]),
        (reduce_min, lambda x: AA(min(np.min(xi) for xi in x)),
         lambda x, f: [max_bwd(xi, f) for xi in x]),
        (reduce_mean,
         lambda x: AA(sum(np.sum(xi)
                          for xi in x) / sum(xi.size for xi in x)), lambda x,
         f: [np.ones_like(xi) / sum(xj.size for xj in x) for xi in x]),
        (reduce_log_sum_exp,
         lambda x: AA(np.log(sum(np.sum(np.exp(xi)) for xi in x))),
         lambda x, f: [np.exp(xi - f) for xi in x]),
        (reduce_prod, lambda x: AA(np.prod([np.prod(xi) for xi in x])),
         lambda x, f: [f / xi for xi in x])
    ]

    for op, fwd, bwd in ops:
        input_op = op(a, axis=Axis.all_axes())
        expected_forward = fwd(value)
        expected_backward = bwd(value, expected_forward)
        binding = {a: value}
        actual_backward = input_op.grad(binding)[0]
        actual_forward = np.copy(input_op.eval(binding))
        assert np.allclose(actual_forward, expected_forward)
        for ab, eb in zip(actual_backward, expected_backward):
            assert np.allclose(ab, eb)
示例#4
0
def test_op_reduce_all(input_data, axis, device_id, precision):
    dt = PRECISION_TO_TYPE[precision]
    data = AA(input_data, dtype=dt)
    a = C.sequence.input_variable(shape=data.shape,
                                  dtype=sanitize_dtype_cntk(dt),
                                  needs_gradient=True,
                                  name='a')
    # create batch
    value = [AA([data, data - 0.5], dtype=dt), AA([data + 0.25], dtype=dt)]
    from .. import reduce_sum, reduce_max, reduce_min, reduce_mean, reduce_log_sum_exp, reduce_prod
    from cntk import Axis

    def max_bwd(x, f):
        y = np.zeros_like(x)
        yr = y.ravel()
        xr = x.ravel()
        for i in range(x.size):
            if xr[i] == f: yr[i] = 1
        return y

    ops = [
        (reduce_sum, lambda x: AA(sum(np.sum(xi) for xi in x)),
         lambda x, f: [np.ones_like(xi) for xi in x]),
        (reduce_max, lambda x: AA(max(np.max(xi) for xi in x)),
         lambda x, f: [max_bwd(xi, f) for xi in x]),
        (reduce_min, lambda x: AA(min(np.min(xi) for xi in x)),
         lambda x, f: [max_bwd(xi, f) for xi in x]),
        (reduce_mean,
         lambda x: AA(sum(np.sum(xi)
                          for xi in x) / sum(xi.size for xi in x)), lambda x,
         f: [np.ones_like(xi) / sum(xj.size for xj in x) for xi in x]),
        (reduce_log_sum_exp,
         lambda x: AA(np.log(sum(np.sum(np.exp(xi)) for xi in x))),
         lambda x, f: [np.exp(xi - f) for xi in x]),
        (reduce_prod, lambda x: AA(np.prod([np.prod(xi) for xi in x])),
         lambda x, f: [f / xi for xi in x])
    ]

    for op, fwd, bwd in ops:
        input_op = op(a, axis=Axis.all_axes())
        expected_forward = fwd(value)
        expected_backward = bwd(value, expected_forward)
        binding = {a: value}
        actual_backward = input_op.grad(binding)
        actual_forward = input_op.eval(binding)
        assert np.allclose(actual_forward, expected_forward)
        for ab, eb in zip(actual_backward, expected_backward):
            assert np.allclose(ab, eb)
示例#5
0
def test_op_reduce_mean_all_constant(input_data, axis, device_id, precision):
    # dt = PRECISION_TO_TYPE[precision]
    # FIXME: we'd like to do dt = PRECISION_TO_TYPE[precision]
    # however there seems to be an issue with actual_forward below
    # that gets computed correctly but by the time np.allclose executes
    # it contains garbage values. The problem goes away if one uses
    # actual_forward  = np.copy(input_op.eval())
    dt = np.float32
    value = AA(input_data, dtype=dt)
    from .. import reduce_mean
    from cntk import Axis, Constant
    a = Constant(value, name='a')
    input_op = reduce_mean(a, axis=Axis.all_axes())
    expected_forward = AA(np.mean(value))
    actual_forward = input_op.eval()
    assert np.allclose(actual_forward, expected_forward)
示例#6
0
def test_op_reduce_mean_all_constant(input_data, axis, device_id, precision):
    # dt = PRECISION_TO_TYPE[precision]
    # FIXME: we'd like to do dt = PRECISION_TO_TYPE[precision]
    # however there seems to be an issue with actual_forward below
    # that gets computed correctly but by the time np.allclose executes
    # it contains garbage values. The problem goes away if one uses 
    # actual_forward  = np.copy(input_op.eval())
    dt = np.float32
    value = AA(input_data, dtype=dt)
    from .. import reduce_mean
    from cntk import Axis, Constant
    a = Constant(value, name='a')
    input_op = reduce_mean(a, axis=Axis.all_axes())
    expected_forward = AA(np.mean(value))
    actual_forward  = input_op.eval()
    assert np.allclose(actual_forward, expected_forward)
示例#7
0
def test_op_reduce_all(input_data, axis, device_id, precision):
    # FIXME: we'd like to do dt = PRECISION_TO_TYPE[precision]
    # however there seems to be an issue with actual_forward below
    # that gets computed correctly but by the time np.allclose executes
    # it contains garbage values. The problem goes away if one uses 
    # actual_forward  = np.copy(input_op.eval(binding))
    dt = np.float32
    data = AA(input_data, dtype=dt)
    a = I(shape=data.shape,
          dtype=sanitize_dtype_cntk(dt),
          needs_gradient=True,
          name='a')
    # create batch
    value = [AA([data,data-0.5], dtype=dt),AA([data+0.25], dtype=dt)]
    from .. import reduce_sum, reduce_max, reduce_min, reduce_mean, reduce_log_sum_exp, reduce_prod
    from cntk import Axis
    def max_bwd(x,f):
        y = np.zeros_like(x)
        yr = y.ravel()
        xr = x.ravel()
        for i in range(x.size):
            if xr[i] == f: yr[i] = 1
        return y

    ops = [ (reduce_sum,         lambda x:AA(sum(np.sum(xi) for xi in x)),                           lambda x,f:[np.ones_like(xi) for xi in x]),
            (reduce_max,         lambda x:AA(max(np.max(xi) for xi in x)),                           lambda x,f:[max_bwd(xi,f) for xi in x]),
            (reduce_min,         lambda x:AA(min(np.min(xi) for xi in x)),                           lambda x,f:[max_bwd(xi,f) for xi in x]),
            (reduce_mean,        lambda x:AA(sum(np.sum(xi) for xi in x)/sum(xi.size  for xi in x)), lambda x,f:[np.ones_like(xi)/sum(xj.size for xj in x) for xi in x]),
            (reduce_log_sum_exp, lambda x:AA(np.log(sum(np.sum(np.exp(xi)) for xi in x))),           lambda x,f:[np.exp(xi-f)     for xi in x]),
            (reduce_prod,        lambda x:AA(np.prod([np.prod(xi) for xi in x])),                    lambda x,f:[f/xi             for xi in x])
            ]
    
    for op,fwd,bwd in ops:
        input_op = op(a, axis=Axis.all_axes())
        expected_forward = fwd(value)
        expected_backward = bwd(value,expected_forward)
        binding = {a: value}
        actual_backward = input_op.grad(binding)[0]
        actual_forward  = np.copy(input_op.eval(binding))
        assert np.allclose(actual_forward, expected_forward)
        for ab,eb in zip (actual_backward, expected_backward):
            assert np.allclose(ab, eb)
示例#8
0
def test_op_reduce_all(input_data, axis, device_id, precision):
    dt = PRECISION_TO_TYPE[precision]
    data = AA(input_data, dtype=dt)
    a = C.sequence.input_variable(shape=data.shape,
                         dtype=sanitize_dtype_cntk(dt),
                         needs_gradient=True,
                         name='a')
    # create batch
    value = [AA([data,data-0.5], dtype=dt),AA([data+0.25], dtype=dt)]
    from .. import reduce_sum, reduce_max, reduce_min, reduce_mean, reduce_log_sum_exp, reduce_prod
    from cntk import Axis
    def max_bwd(x,f):
        y = np.zeros_like(x)
        yr = y.ravel()
        xr = x.ravel()
        for i in range(x.size):
            if xr[i] == f: yr[i] = 1
        return y

    ops = [ (reduce_sum,         lambda x:AA(sum(np.sum(xi) for xi in x)),                           lambda x,f:[np.ones_like(xi) for xi in x]),
            (reduce_max,         lambda x:AA(max(np.max(xi) for xi in x)),                           lambda x,f:[max_bwd(xi,f) for xi in x]),
            (reduce_min,         lambda x:AA(min(np.min(xi) for xi in x)),                           lambda x,f:[max_bwd(xi,f) for xi in x]),
            (reduce_mean,        lambda x:AA(sum(np.sum(xi) for xi in x)/sum(xi.size  for xi in x)), lambda x,f:[np.ones_like(xi)/sum(xj.size for xj in x) for xi in x]),
            (reduce_log_sum_exp, lambda x:AA(np.log(sum(np.sum(np.exp(xi)) for xi in x))),           lambda x,f:[np.exp(xi-f)     for xi in x]),
            (reduce_prod,        lambda x:AA(np.prod([np.prod(xi) for xi in x])),                    lambda x,f:[f/xi             for xi in x])
            ]

    for op,fwd,bwd in ops:
        input_op = op(a, axis=Axis.all_axes())
        expected_forward = fwd(value)
        expected_backward = bwd(value,expected_forward)
        binding = {a: value}
        actual_backward = input_op.grad(binding)
        actual_forward  = input_op.eval(binding)
        assert np.allclose(actual_forward, expected_forward)
        for ab,eb in zip (actual_backward, expected_backward):
            assert np.allclose(ab, eb)