def test_multiclass_oracle_c(): n_states = 10 Losses = create_losses(n_states) for Loss in Losses: Loss = np.random.random_sample((n_states, n_states)) scores = np.random.random_sample((n_states, )) L = np.max(Loss) * np.log(n_states) eta = np.log(n_states) / (2 * L) # initialize variables nu = np.ones(n_states) / n_states p = np.ones(n_states) / n_states eps = 1e-2 max_iter = int(4 * L / eps) Logs = list(np.arange(0, max_iter)) mu_p, q_p, nu_p, p_p, dual_gaps_p, _ = maxmin_spmp_multiclass_p( nu, p, scores, Loss, max_iter, eta, Logs=Logs) nu = np.ones(n_states) / n_states p = np.ones(n_states) / n_states mu_c, q_c, nu_c, p_c, dual_gaps_c = multiclass_oracle_c( nu, p, scores, Loss, max_iter, eta) mu_c, q_c = np.array(mu_c), np.array(q_c) nu_c, p_c = np.array(nu_c), np.array(p_c) dual_gaps_c = np.array(dual_gaps_c) assert_allclose(mu_c, mu_p, rtol=7) assert_allclose(q_c, q_p, rtol=7) assert_allclose(nu_c, nu_p, rtol=7) assert_allclose(p_c, p_p, rtol=7) assert_allclose(dual_gaps_c, dual_gaps_p, rtol=7) assert dual_gaps_c[-1] > 0 and dual_gaps_c[-1] < eps
def test_multiclass_oracle_p(): # N_states, Precisions = [2, 5, 10], [1, 2, 3] N_states, Precisions = [5], [1, 2] for n_states in N_states: Losses = create_losses(n_states) for Loss in Losses: scores = np.random.random_sample((n_states, )) # run cvxopt mu_cx, en_cx, _, _ = maxmin_multiclass_cvxopt(scores, Loss) L = np.max(Loss) * np.log(n_states) eta = np.log(n_states) / (2 * L) # initialize variables nu = np.ones(n_states) / n_states p = np.ones(n_states) / n_states Eps = [1 / (10**precision) for precision in Precisions] Logs = [int(4 * L / eps) for eps in Eps] max_iter = Logs[-1] mu_p, q_avg, _, _, _, En_p = maxmin_spmp_multiclass_p(nu, p, scores, Loss, max_iter, eta, Logs=Logs) for i, en_p in enumerate(En_p): assert_allclose(en_p, en_cx, rtol=Precisions[i])
def test_apply_exp2(): rng = np.random.RandomState(0) a = rng.random_sample((10, 10)).astype(dtype, copy=False) expected = np.exp(a) actual = a.copy() apply_exp2(actual, 10, 10) assert_allclose(actual, expected, rtol=RTOL[dtype])
def test_sum_product_chain(): eps = 1e-3 n_states = 10 length = 20 unary_scores = 10e4 * (np.random.random_sample((length, n_states)) - 0.5) pairwise_scores = 10e4 * (np.random.random_sample( (length - 1, n_states, n_states)) - 0.5) start = time.time() n_p = np.empty([length, n_states]) e_p = np.empty([length - 1, n_states, n_states]) sum_product_p(unary_scores, pairwise_scores, n_p, e_p) slow = time.time() - start start = time.time() n_c = np.empty([length, n_states]) e_c = np.empty([length - 1, n_states, n_states]) sum_product_c(unary_scores, pairwise_scores, n_c, e_c) fast = time.time() - start # import pdb; pdb.set_trace() assert_allclose(n_c, n_p, rtol=7) assert_allclose(e_c, e_p, rtol=7) # assert_allclose(bm_c, bm_p, rtol=7) # assert_allclose(fm_c, fm_p, rtol=7) # assert_allclose(logp_c, logp_p, rtol=7) print("Speedup is {}".format(slow / fast))
def test_softmax2(): rng = np.random.RandomState(0) a = rng.random_sample((10, 10)).astype(dtype, copy=False) b = rng.random_sample((10, 10)).astype(dtype, copy=False) expected = b * np.exp(a) expected = expected / expected.sum(1, keepdims=True) actual = a.copy() softmax2_c(actual, b) assert_allclose(actual, expected, rtol=RTOL[dtype])
def test_linear_comb_memview(): rng = np.random.RandomState(0) a = rng.random_sample(10).astype(dtype, copy=False) b = rng.random_sample(10).astype(dtype, copy=False) alpha = rng.random_sample() beta = rng.random_sample() expected = alpha * a + beta * b actual = a.copy() linear_comb_c(alpha, beta, actual, b) assert_allclose(actual, expected, rtol=RTOL[dtype])
def test_nrm2(dtype): nrm2 = _nrm2_memview[_numpy_to_cython(dtype)] rng = np.random.RandomState(0) x = rng.random_sample(10).astype(dtype, copy=False) expected = np.linalg.norm(x) actual = nrm2(x) assert_allclose(actual, expected, rtol=RTOL[dtype])
def test_asum(dtype): asum = _asum_memview[_numpy_to_cython(dtype)] rng = np.random.RandomState(0) x = rng.random_sample(10).astype(dtype, copy=False) expected = np.abs(x).sum() actual = asum(x) assert_allclose(actual, expected, rtol=RTOL[dtype])
def test_scal(dtype): scal = _scal_memview[_numpy_to_cython(dtype)] rng = np.random.RandomState(0) x = rng.random_sample(10).astype(dtype, copy=False) alpha = 2.5 expected = alpha * x scal(alpha, x) assert_allclose(x, expected, rtol=RTOL[dtype])
def test_copy(dtype): copy = _copy_memview[_numpy_to_cython(dtype)] rng = np.random.RandomState(0) x = rng.random_sample(10).astype(dtype, copy=False) y = np.empty_like(x) expected = x.copy() copy(x, y) assert_allclose(y, expected, rtol=RTOL[dtype])
def test_dot(dtype): dot = _dot_memview[_numpy_to_cython(dtype)] rng = np.random.RandomState(0) x = rng.random_sample(10).astype(dtype, copy=False) y = rng.random_sample(10).astype(dtype, copy=False) expected = x.dot(y) actual = dot(x, y) assert_allclose(actual, expected, rtol=RTOL[dtype])
def test_axpy(dtype): axpy = _axpy_memview[_numpy_to_cython(dtype)] rng = np.random.RandomState(0) x = rng.random_sample(10).astype(dtype, copy=False) y = rng.random_sample(10).astype(dtype, copy=False) alpha = 2.5 expected = alpha * x + y axpy(alpha, x, y) assert_allclose(y, expected, rtol=RTOL[dtype])
def test_augment_edges(): rng = np.random.RandomState(0) bscores = rng.random_sample((9, 10, 10)).astype(dtype, copy=False) pairwise_potentials = rng.random_sample((10, 10)).astype(dtype, copy=False) nu_edges = rng.random_sample((9, 10, 10)).astype(dtype, copy=False) eta = 0.1 # expected repeated_potentials = np.repeat(pairwise_potentials[np.newaxis, :, :], 9, axis=0) expected = eta * repeated_potentials + nu_edges # actual augment_edges(bscores, pairwise_potentials, nu_edges, eta, 10, 10) assert_allclose(bscores, expected, rtol=RTOL[dtype])
def test_ger(dtype, order): ger = _ger_memview[_numpy_to_cython(dtype)] rng = np.random.RandomState(0) x = rng.random_sample(10).astype(dtype, copy=False) y = rng.random_sample(20).astype(dtype, copy=False) A = np.asarray(rng.random_sample((10, 20)).astype(dtype, copy=False), order=ORDER[order]) alpha = 2.5 expected = alpha * np.outer(x, y) + A ger(alpha, x, y, A) assert_allclose(A, expected, rtol=RTOL[dtype])
def test_gemv(dtype, opA, transA, order): gemv = _gemv_memview[_numpy_to_cython(dtype)] rng = np.random.RandomState(0) A = np.asarray(opA(rng.random_sample((20, 10)).astype(dtype, copy=False)), order=ORDER[order]) x = rng.random_sample(10).astype(dtype, copy=False) y = rng.random_sample(20).astype(dtype, copy=False) alpha, beta = 2.5, -0.5 expected = alpha * opA(A).dot(x) + beta * y gemv(transA, alpha, A, x, beta, y) assert_allclose(y, expected, rtol=RTOL[dtype])
def test_linear_comb2(): rng = np.random.RandomState(0) a = rng.random_sample((10, 10)).astype(dtype, copy=False) b = rng.random_sample((10, 10)).astype(dtype, copy=False) alpha = 0.1 beta = 0.9 for expn in [0, 1]: if expn: expected = alpha * a + beta * np.exp(b) else: expected = alpha * a + beta * b actual = a.copy() linear_comb2(10, 10, alpha, beta, actual, b, expn) assert_allclose(actual, expected, rtol=RTOL[dtype])
def test_augment_nodes(): rng = np.random.RandomState(0) uscores = rng.random_sample((10, 10)).astype(dtype, copy=False) p = rng.random_sample((10, 10)).astype(dtype, copy=False) Loss = rng.random_sample((10, 10)).astype(dtype, copy=False) unary_potentials = rng.random_sample((10, 10)).astype(dtype, copy=False) nu_nodes = rng.random_sample((10, 10)).astype(dtype, copy=False) eta = 0.1 # expected expected = eta * np.dot(p, Loss) + eta * unary_potentials - nu_nodes expected[0] = expected[0] + nu_nodes[0] expected[-1] = expected[-1] + nu_nodes[-1] # actual augment_nodes(uscores, p, Loss, unary_potentials, nu_nodes, eta, 10, 10) assert_allclose(uscores, expected, rtol=RTOL[dtype])
def test_gemm(dtype, opA, transA, opB, transB, order): gemm = _gemm_memview[_numpy_to_cython(dtype)] rng = np.random.RandomState(0) A = np.asarray(opA(rng.random_sample((30, 10)).astype(dtype, copy=False)), order=ORDER[order]) B = np.asarray(opB(rng.random_sample((10, 20)).astype(dtype, copy=False)), order=ORDER[order]) C = np.asarray(rng.random_sample((30, 20)).astype(dtype, copy=False), order=ORDER[order]) alpha, beta = 2.5, -0.5 expected = alpha * opA(A).dot(opB(B)) + beta * C gemm(transA, transB, alpha, A, B, beta, C) assert_allclose(C, expected, rtol=RTOL[dtype])
def test_rot(dtype): rot = _rot_memview[_numpy_to_cython(dtype)] rng = np.random.RandomState(0) x = rng.random_sample(10).astype(dtype, copy=False) y = rng.random_sample(10).astype(dtype, copy=False) c = dtype(rng.randn()) s = dtype(rng.randn()) expected_x = c * x + s * y expected_y = c * y - s * x rot(x, y, c, s) assert_allclose(x, expected_x) assert_allclose(y, expected_y)
def test_rotg(dtype): rotg = _rotg_memview[_numpy_to_cython(dtype)] rng = np.random.RandomState(0) a = dtype(rng.randn()) b = dtype(rng.randn()) c, s = 0.0, 0.0 def expected_rotg(a, b): roe = a if abs(a) > abs(b) else b if a == 0 and b == 0: c, s, r, z = (1, 0, 0, 0) else: r = np.sqrt(a**2 + b**2) * (1 if roe >= 0 else -1) c, s = a / r, b / r z = s if roe == a else (1 if c == 0 else 1 / c) return r, z, c, s expected = expected_rotg(a, b) actual = rotg(a, b, c, s) assert_allclose(actual, expected, rtol=RTOL[dtype])
def test_SPMP(): length = 5 n_states = 5 Loss = np.ones((n_states, n_states)) np.fill_diagonal(Loss, 0.0) # Loss = toeplitz(np.arange(n_states)) unary_potentials = np.random.random_sample((length, n_states)) pairwise_potentials = np.random.random_sample((n_states, n_states)) edges = np.stack((np.arange(0, length - 1), np.arange(1, length)), 1) p = np.ones((length, n_states)) / n_states nu_nodes = np.ones((length, n_states)) / n_states nu_edges = np.ones((length - 1, n_states, n_states)) / (n_states**2) max_iter = 100 eta = 1 / (2 * np.max(Loss)) # eta = 5. start = time.time() out1_p, out2_p, dg_p = maxmin_spmp_sequence_p2(nu_nodes, nu_edges, p, unary_potentials, pairwise_potentials, Loss, max_iter, eta, sum_product_cython=True) slow = time.time() - start mun_p, mue_p, q_p = out1_p[0][0], out1_p[0][1], out1_p[1] nun_p, nue_p, p_p = out2_p[0][0], out2_p[0][1], out2_p[1] start = time.time() out1_c, out2_c, dg_c = maxmin_spmp_sequence_c2(nu_nodes, nu_edges, p, unary_potentials, pairwise_potentials, Loss, max_iter, eta) fast = time.time() - start mun_c, mue_c = np.array(out1_c[0][0]), np.array(out1_c[0][1]) nun_c, nue_c = np.array(out2_c[0][0]), np.array(out2_c[0][1]) q_c = np.array(out1_c[1]) p_c = np.array(out2_c[1]) dg_c = np.array(dg_c) assert_allclose(mun_c, mun_p, rtol=7) assert_allclose(mue_c, mue_p, rtol=7) assert_allclose(q_c, q_p, rtol=7) assert_allclose(nun_c, nun_p, rtol=7) assert_allclose(nue_c, nue_p, rtol=7) assert_allclose(p_c, p_p, rtol=7) assert_allclose(dg_c, dg_p, rtol=7)
def test_min_memview(): rng = np.random.RandomState(0) a = rng.random_sample(10).astype(dtype, copy=False) expected = np.min(a) actual = min_c(a) assert_allclose(actual, expected, rtol=RTOL[dtype])
def test_logsumexp(): rng = np.random.RandomState(0) a = rng.random_sample(10).astype(dtype, copy=False) expected = np.log((np.exp(a - a.max())).sum()) + a.max() actual = logsumexp(a, a.max()) assert_allclose(actual, expected, rtol=RTOL[dtype])