def test_case(): w_val = np_utils.random_vec((in_features, out_features)) w = torch.tensor(w_val, requires_grad=True).double() b_val = np_utils.random_vec((out_features, )).reshape(out_features) b = torch.tensor(b_val, requires_grad=True).double() x_val = np_utils.random_vec((batch_size, in_features)) x = torch.tensor(x_val, requires_grad=True).double() torch.autograd.gradcheck(linear, (x, w, b))
def sample(): a = torch.Tensor(np_utils.random_vec((batch_size, emb_size))) b = torch.Tensor(np_utils.random_vec((batch_size, emb_size))) c = np_utils.random_vec([], low=0.0, high=1.) c = float(c) logger.debug("c: {}".format(c)) res = mobius.add(-a, mobius.add(a, b, c), c).data.numpy() expected = b.data.numpy() assert res.shape == expected.shape assert np.allclose(res, expected)
def test_case(): p_val = np_utils.random_vec((K, emb_size)) a_val = np_utils.random_vec((K, emb_size)) cval = 1. #c = torch.Tensor(cval).double() c = cval p = torch.tensor(p_val, requires_grad=True).double() a = torch.tensor(a_val, requires_grad=True).double() x_val = np_utils.random_vec((batch_size, emb_size)) x = torch.tensor(x_val, requires_grad=True).double() torch.autograd.gradcheck(mobius.logits, (x, p, a, c))
def sample(): aval = np_utils.random_vec((batch_size, emb_size), low=-0.01) bval = np_utils.random_vec((batch_size, emb_size), low=-0.01) a = torch.Tensor(aval) b = torch.Tensor(bval) #cval = np_utils.random_vec((1, ), low=0.0, high=1.) # TODO: Why does this fail with random c? cval = 1. c = cval logger.info("c: {}".format(c)) res = mobius.squared_distance(a, b, c).data.numpy() expected = np_utils.squared_distance(aval, bval, cval) assert res.shape == expected.shape assert np.allclose(res, expected)
def test_logits(): batch_size = 4 emb_size = 5 K = 3 p_val = np_utils.random_vec((K, emb_size)) a_val = np_utils.random_vec((K, emb_size)) cval = 1. #c = torch.Tensor(cval).double() c = cval p = torch.tensor(p_val).double() a = torch.tensor(a_val).double() x_val = np_utils.random_vec((batch_size, emb_size)) x = torch.tensor(x_val).double() logits = mobius.logits(x, p, a, c) assert tuple(logits.shape) == (batch_size, K)
def test_case(): #c_val = np_utils.random_vec((1, ), low=0.5, high=1.) c_val = 1. logger.debug("c={}".format(c_val)) c = c_val x = torch.Tensor(np_utils.random_vec((batch_size, in_features))) hnn_linear = hnn.Linear(in_features, out_features, c=c) M = hnn_linear.weight.data.numpy() b = hnn_linear.bias.data.numpy() np_res = np_utils.Linear(x.data.numpy(), M, b, c_val) torch_res = hnn_linear(x) assert np.allclose(np_res, torch_res.data.numpy(), atol=1e-7)
def test_case(): x = torch.tensor( np_utils.random_vec((batch_size, in_features)), requires_grad=True).double() torch.autograd.gradcheck(hnn_dense, x)
def test_case(): x = torch.tensor(np_utils.random_vec((batch_size, in_features)), ).double() res_dense = hnn_dense(x).data.numpy() res_act_linear = torch.tanh(hnn_linear(x)).data.numpy() assert np.allclose(res_dense, res_act_linear)
def test_case(): x = torch.tensor( np_utils.random_vec((batch_size, timesteps, emb_size)), requires_grad=True).double() torch.autograd.gradcheck(hnn_rnn, x)