def test_lambda_rank(grad, value, output, gain, device_id, precision): dt = PRECISION_TO_TYPE[precision] score = AA(output, dtype=dt).reshape(-1, 1, 1) gain = AA(gain, dtype=dt).reshape(-1, 1, 1) group = np.ones_like(score).reshape(-1, 1, 1) expected_value = AA(value, dtype=dt) expected_grad = AA(grad, dtype=dt) from cntk.losses import lambda_rank g = input((1, )) s = input((1, ), needs_gradient=True) n = input((1, )) f = lambda_rank(s, n, g) actual_grad, actual_value = f.grad({ s: score, n: gain, g: group }, [s], [f.output]) assert np.allclose(actual_value, expected_value) assert np.allclose(actual_grad, expected_grad)
def test_lambda_rank(grad, value, output, gain, device_id, precision): dt = PRECISION_TO_TYPE[precision] score = AA(output, dtype=dt).reshape(-1,1,1) gain = AA(gain, dtype=dt).reshape(-1,1,1) group = np.ones_like(score).reshape(-1,1,1) expected_value = AA(value, dtype=dt) expected_grad = AA(grad, dtype=dt) from cntk.losses import lambda_rank g = C.input_variable((1,)) s = C.input_variable((1,), needs_gradient=True) n = C.input_variable((1,)) f = lambda_rank(s, n, g) actual_grad, actual_value = f.grad({s:score, n:gain, g:group}, [s], [f.output]) assert np.allclose(actual_value, expected_value) assert np.allclose(actual_grad, expected_grad)