def _make_grad_hmm_normalizer(argnum, ans, log_pi0, log_Ps, ll): # Unbox the inputs if necessary log_pi0 = getval(log_pi0) log_Ps = getval(log_Ps) ll = getval(ll) # Make sure everything is C contiguous to_c = lambda arr: np.copy(arr, 'C') if not arr.flags['C_CONTIGUOUS'] else arr log_pi0 = to_c(log_pi0) log_Ps = to_c(log_Ps) ll = to_c(ll) dlog_pi0 = np.zeros_like(log_pi0) dlog_Ps= np.zeros_like(log_Ps) dll = np.zeros_like(ll) T, K = ll.shape # Forward pass to get alphas alphas = np.zeros((T, K)) forward_pass(log_pi0, log_Ps, ll, alphas) grad_hmm_normalizer(log_Ps, alphas, dlog_pi0, dlog_Ps, dll) if argnum == 0: return lambda g: g * dlog_pi0 if argnum == 1: return lambda g: g * dlog_Ps if argnum == 2: return lambda g: g * dll
def _make_grad_hmm_normalizer(argnum, ans, pi0, Ps, ll): # Make sure everything is C contiguous and unboxed pi0 = to_c(pi0) Ps = to_c(Ps) ll = to_c(ll) dlog_pi0 = np.zeros_like(pi0) dlog_Ps = np.zeros_like(Ps) dll = np.zeros_like(ll) T, K = ll.shape # Forward pass to get alphas alphas = np.zeros((T, K)) forward_pass(pi0, Ps, ll, alphas) grad_hmm_normalizer(np.log(Ps), alphas, dlog_pi0, dlog_Ps, dll) # Compute necessary gradient # Account for the log transformation # df/dP = df/dlogP * dlogP/dP = df/dlogP * 1 / P if argnum == 0: return lambda g: g * dlog_pi0 / pi0 if argnum == 1: return lambda g: g * dlog_Ps / Ps if argnum == 2: return lambda g: g * dll
def test_grad_hmm_normalizer(T=1000, K=3): log_pi0, log_Ps, ll = make_parameters(T, K) dlog_pi0, dlog_Ps, dll = np.zeros_like(log_pi0), np.zeros_like(log_Ps), np.zeros_like(ll) alphas = np.zeros((T, K)) forward_pass(-np.log(K) * np.ones(K), log_Ps, ll, alphas) grad_hmm_normalizer(log_Ps, alphas, dlog_pi0, dlog_Ps, dll) assert np.allclose(dlog_pi0, grad(hmm_normalizer_np, argnum=0)(log_pi0, log_Ps, ll)) assert np.allclose(dlog_Ps, grad(hmm_normalizer_np, argnum=1)(log_pi0, log_Ps, ll)) assert np.allclose(dll, grad(hmm_normalizer_np, argnum=2)(log_pi0, log_Ps, ll))
def test_grad_hmm_normalizer(T=10, K=3): pi0, Ps, ll = make_parameters(T, K) dlogpi0, dlogPs, dll = np.zeros_like(pi0), np.zeros_like(Ps), np.zeros_like(ll) alphas = np.zeros((T, K)) forward_pass(pi0, Ps, ll, alphas) grad_hmm_normalizer(np.log(Ps), alphas, dlogpi0, dlogPs, dll) assert np.allclose(dlogpi0 / pi0, grad(hmm_normalizer_np, argnum=0)(pi0, Ps, ll)) assert np.allclose(dlogPs / Ps, grad(hmm_normalizer_np, argnum=1)(pi0, Ps, ll)) assert np.allclose(dll, grad(hmm_normalizer_np, argnum=2)(pi0, Ps, ll))
def _make_grad_hmm_normalizer(argnum, ans, log_pi0, log_Ps, ll): # Make sure everything is C contiguous and unboxed log_pi0 = to_c(log_pi0) log_Ps = to_c(log_Ps) ll = to_c(ll) dlog_pi0 = np.zeros_like(log_pi0) dlog_Ps= np.zeros_like(log_Ps) dll = np.zeros_like(ll) T, K = ll.shape # Forward pass to get alphas alphas = np.zeros((T, K)) forward_pass(log_pi0, log_Ps, ll, alphas) grad_hmm_normalizer(log_Ps, alphas, dlog_pi0, dlog_Ps, dll) if argnum == 0: return lambda g: g * dlog_pi0 if argnum == 1: return lambda g: g * dlog_Ps if argnum == 2: return lambda g: g * dll