Пример #1
0
def test_merge_obs_r_values():
    a1 = pe.pseudo_Obs(1.1, .1, 'a|1')
    a2 = pe.pseudo_Obs(1.2, .1, 'a|2')
    a = pe.merge_obs([a1, a2])

    assert np.isclose(a.r_values['a|1'], a1.value)
    assert np.isclose(a.r_values['a|2'], a2.value)
    assert np.isclose(a.value, np.mean([a1.value, a2.value]))
Пример #2
0
 def standardToNat( cls, pi ):
     if( np.any( np.isclose( pi, 0.0 ) ) ):
         n = np.empty_like( pi )
         n[ ~np.isclose( pi, 0.0 ) ] = np.log( pi[ ~np.isclose( pi, 0.0 ) ] )
         n[ np.isclose( pi, 0.0 ) ] = np.NINF
     else:
         n = np.log( pi )
     return ( n, )
Пример #3
0
    def calc_B(self, k):
        current_left_scaled = self.calc_u_l_grad(self.params['x_J'])
        current_right_scaled = self.calc_u_r_grad(self.params['x_J'])

        if np.isclose(current_left_scaled, 0.0) and np.isclose(
                current_right_scaled, 0.0):
            B = -1.0
        else:
            B = current_left_scaled / current_right_scaled

        return B
Пример #4
0
def test_gradient(X, beta, gamma):
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        np_grad_beta = grad(jastrow_np, 1)(X, beta, gamma) / jastrow_np(X, beta, gamma)
        np_grad_gamma = grad(jastrow_np, 2)(X, beta, gamma) / jastrow_np(X, beta, gamma)

    psi = JastrowOrion(beta, gamma)
    actual = psi.gradient(X)
    assert 2 == len(actual)

    if math.isfinite(np_grad_beta):
        assert np.isclose(np_grad_beta, actual[0])
    if math.isfinite(np_grad_gamma):
        assert np.isclose(np_grad_gamma, actual[1])
Пример #5
0
def distint_locs(xy):
    Dist = dist_mat(xy, xy)
    d_list = [Dist[i] for i in range(len(Dist))]
    D_ = sorted(d_list, key=row_comp)
    Duniq = []
    ids_ = []
    for i in range(len(D_) - 1):
        di = 1.0 * (D_[i] < TAU)
        di1 = 1.0 * (D_[i + 1] < TAU)
        if not np.isclose(np.linalg.norm(di - di1), 0):
            Duniq.append(D_[i])
            ids_.append(i)
    Duniq.append(D_[-1])
    ids_.append(len(D_) - 1)

    if plot > 1:
        D = np.stack(Duniq)
        fig, ax = plt.subplots(nrows=1, ncols=1 + len(threshs))
        ax[0].imshow(D)
        for i in range(len(threshs)):
            ax[i + 1].imshow(D < threshs[i])
        plt.show()
        print(xy[:, ids_].shape)
        Dist = dist_mat(xy[:, ids_], xy[:, ids_])
        fig, ax = plt.subplots(nrows=1, ncols=1 + len(threshs))
        ax[0].imshow(Dist)
        for i in range(len(threshs)):
            ax[i + 1].imshow(Dist < threshs[i])
        plt.show()
        plt.scatter(xy[0, ids_], xy[1, ids_], c='r', marker='x')
        plt.show()

    return ids_
Пример #6
0
def testCategoricalHMMWithKnownStates():

    T = 50
    K = 3
    obsDim = 2
    D = 3

    mp = CategoricalHMM()

    initialDist = Dirichlet.generate(D=K)
    transDist = TransitionDirichletPrior.generate(D_in=K, D_out=K)
    emissionDist = TransitionDirichletPrior.generate(D_in=K, D_out=obsDim)

    ys = [Categorical.generate(D=obsDim, size=T) for _ in range(D)]

    start = time.time()
    mp.updateParams(initialDist, transDist, emissionDist, ys)
    end = time.time()
    print('Preprocess: ', end - start)

    kS = int(np.random.random() * T / 10) + 2
    knownStates = np.random.choice(T, kS)
    knownStates = np.vstack(
        (knownStates, np.random.choice(K, knownStates.shape[0]))).reshape(
            (2, -1)).T

    # Sort and remove duplicates
    knownStates = np.array(sorted(knownStates, key=lambda x: x[0]))
    knownStates = knownStates[1:][~(np.diff(knownStates[:, 0]) == 0)]

    # print( knownStates )

    start = time.time()
    alphas = mp.forwardFilter(knownLatentStates=knownStates)
    betas = mp.backwardFilter(knownLatentStates=knownStates)
    end = time.time()
    print('Both filters: ', end - start)

    marginal = np.logaddexp.reduce(alphas[-1])
    for a, b in zip(alphas, betas):
        # print( a + b )
        # comp = np.logaddexp.reduce( a + b )
        comp = mp.log_marginalFromAlphaBeta(a, b)
        assert np.isclose(comp, marginal), comp - marginal

    for t in range(T - 1):
        joint = mp.childParentJoint(t, alphas, betas)

        parentProb = np.logaddexp.reduce(joint, axis=1)
        childProb = np.logaddexp.reduce(joint, axis=0)

        trueParent = alphas[t] + betas[t]
        trueChild = alphas[t + 1] + betas[t + 1]

        assert np.allclose(parentProb, trueParent)
        assert np.allclose(childProb, trueChild)

    print(
        'Passed the categorical forward backward marginal test with known states!\n\n'
    )
Пример #7
0
def test_batches():
    demo = simple_five_pop_demo()

    sampled_n_dict = dict(zip(demo.leafs, [10]*5))
    num_bases=1000
    sfs = demo.simulate_data(
        length=num_bases,
        muts_per_gen=.1/num_bases,
        recoms_per_gen=0,
        num_replicates=1000,
        sampled_n_dict=sampled_n_dict)._sfs


    sfs_len = sfs.n_nonzero_entries

    print("total entries", sfs_len)
    print("total snps", sfs.n_snps())

    assert sfs_len > 30

    demo = demo._get_demo(sampled_n_dict)
    assert np.isclose(SfsLikelihoodSurface(sfs, batch_size=5).log_lik(demo),
                      momi.likelihood._composite_log_likelihood(sfs, demo))

    assert np.allclose(SfsLikelihoodSurface(sfs, batch_size=5).log_lik(demo, vector=True),
                      momi.likelihood._composite_log_likelihood(sfs, demo, vector=True))
Пример #8
0
def test_eval(X, beta):
    psi = JastrowMcMillian(5, beta, L)
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        np_eval = jastrow_np(X, 5, beta)
        if math.isfinite(np_eval):
            assert np.isclose(np_eval, psi(X), equal_nan=True)
Пример #9
0
    def compare_smoother_grads(lds):
        init_params, pair_params, node_params = lds

        symmetrize = make_unop(lambda x: (x + x.T)/2. if np.ndim(x) == 2 else x, tuple)

        messages, _ = natural_filter_forward_general(*lds)
        dotter = randn_like(natural_smoother_general(messages, *lds))

        def py_fun(messages):
            result = natural_smoother_general(messages, *lds)
            assert shape(result) == shape(dotter)
            return contract(dotter, result)

        dense_messages, _ = _natural_filter_forward_general(
            init_params, pair_params, node_params)
        def cy_fun(messages):
            result = _natural_smoother_general(messages, pair_params)
            result = result[0][:3], result[1], result[2]
            assert shape(result) == shape(dotter)
            return contract(dotter, result)

        result_py = py_fun(messages)
        result_cy = cy_fun(dense_messages)
        assert np.isclose(result_py, result_cy)

        g_py = grad(py_fun)(messages)
        g_cy = unpack_dense_messages(grad(cy_fun)(dense_messages))

        assert allclose(g_py, g_cy)
Пример #10
0
    def parameterCheck( self, initialDist, transDist, mus, sigmas, ys=None ):

        assert initialDist.shape[ 0 ] == transDist.shape[ 0 ] and transDist.shape[ 0 ] == transDist.shape[ 1 ]
        assert len( mus ) == transDist.shape[ 0 ]
        assert len( sigmas ) == transDist.shape[ 0 ]
        assert np.isclose( 1.0, initialDist.sum() )
        assert np.allclose( np.ones( transDist.shape[ 0 ] ), transDist.sum( axis=1 ) )
Пример #11
0
def test_alternative_solvers():
    dim = 192
    x = np.arange(dim)
    y = 2 * np.exp(-0.06 * x) + np.random.normal(0.0, 0.15, dim)
    yerr = 0.1 + 0.1 * np.random.rand(dim)

    oy = []
    for i, item in enumerate(x):
        oy.append(pe.pseudo_Obs(y[i], yerr[i], 'test'))

    def func(a, x):
        y = a[0] * np.exp(-a[1] * x)
        return y

    chisquare_values = []
    out = pe.least_squares(x, oy, func, method='migrad')
    chisquare_values.append(out.chisquare)
    out = pe.least_squares(x, oy, func, method='Powell')
    chisquare_values.append(out.chisquare)
    out = pe.least_squares(x, oy, func, method='Nelder-Mead')
    chisquare_values.append(out.chisquare)
    out = pe.least_squares(x, oy, func, method='Levenberg-Marquardt')
    chisquare_values.append(out.chisquare)
    chisquare_values = np.array(chisquare_values)
    assert np.all(np.isclose(chisquare_values, chisquare_values[0]))
Пример #12
0
    def reweight(self, norm_order=float('inf'), eps=1e-8):
        """
        Reweights every component to have norm 1,
        and then sorts by the component weights
        """
        components = np.array(self.components)
        components[np.isclose(components, 0, atol=eps)] = 0
        norms = np.linalg.norm(components,
                               ord=norm_order, axis=2)
        all0 = norms == 0
        assert np.all(np.max(np.abs(components),
                             axis=2)[all0] == 0)
        norms[all0] = 1

        components = np.einsum("ijk,ij->ijk",
                               components,
                               1. / norms)
        norms[all0] = 0
        weights = self.weights * np.prod(norms, axis=0)

        sort_components = np.argsort(weights)[::-1]
        weights = weights[sort_components]
        components = components[:, sort_components, :]
        return quartet_decomposition(self.populations,
                                     components,
                                     weights=weights,
                                     fit_info=self.fit_info)
Пример #13
0
def test_parameter_estimation():
    x = np.linspace(-10., 10., 200)

    amplitude = 3.
    x_0 = 4.
    sigma = 2.
    noise = 0.2

    model = Gaussian1D(amplitude, x_0, sigma)
    y = model(x)

    np.random.seed(0)
    y += np.random.normal(0., noise, x.shape)

    # parameter estimation using MiraPy
    init_model = Gaussian1D(1., 1., 1.)
    parest = ParameterEstimation(x, y, init_model, mean_squared_error)
    parest.fit()
    best_model = parest.get_model()

    # paramter estimation using Astropy

    g_init = models.Gaussian1D(amplitude=1., mean=1., stddev=1.)
    pfit = fitting.LevMarLSQFitter()
    new_model = pfit(g_init, x, y)

    assert np.all(np.isclose(best_model(x), new_model(x), atol=0.01))
Пример #14
0
def surgery(Z):
    empties = np.isclose(Z.sum(axis=0), 0)
    Q, R = Z.shape
    if np.any(empties):
        print('!')
    while np.any(empties):
        for r, empty in enumerate(empties):
            if empty:
                # select a nonempty cluster and split it
                c = np.random.choice(np.where(np.logical_not(empties))[0])
                for q in range(Q):
                    if np.random.binomial(1, 0.5):
                        Z[q, r] = Z[q, c]
                        Z[q, c] = 0
        empties = np.isclose(Z.sum(axis=0), 0)
    return Z
Пример #15
0
    def parameterCheck(self, log_initial_dists, log_transition_dists,
                       log_emission_dists):

        assert len(log_initial_dists.keys()) == len(
            log_transition_dists.keys()) and len(
                log_transition_dists.keys()) == len(log_emission_dists.keys())

        for group in log_initial_dists.keys():
            log_initial_dist = log_initial_dists[group]
            log_transition_dist = log_transition_dists[group]
            log_emission_dist = log_emission_dists[group]

            K = log_initial_dist.shape[0]
            assert log_initial_dist.ndim == 1
            assert log_initial_dist.shape == (K, )
            for _transition_dist in log_transition_dist:
                assert np.allclose(np.ones(_transition_dist.shape[:-1]),
                                   np.exp(_transition_dist).sum(
                                       axis=-1)), _transition_dist.sum(axis=-1)
            assert log_emission_dist.shape[0] == K
            assert np.isclose(1.0, np.exp(log_initial_dist).sum())
            assert np.allclose(np.ones(K),
                               np.exp(log_emission_dist).sum(axis=1))
            pis = set()
            for dist in log_transition_dist:
                ndim = dist.shape
                assert ndim not in pis
                pis.add(ndim)
Пример #16
0
    def cavi(self, ys, maxIters=1000, verbose=False):

        last_elbo = 9999

        for i in verboseRange(maxIters, verbose):

            if (i > 0):
                self.state.prior.mf_nat_params = prior_mf_nat_params

            self.state.mf_nat_params = self.state.iexpectedNatParams(
                use_mean_field=True)
            prior_mf_nat_params, normalizer = self.state.variationalPosteriorPriorNatParams(
                ys=ys,
                nat_params=self.state.mf_nat_params,
                prior_nat_params=self.state.prior.nat_params,
                return_normalizer=True)

            # The ELBO computation is only valid right after the variational E step
            elbo = self.state.ELBO(
                normalizer=normalizer,
                prior_mf_nat_params=self.state.prior.mf_nat_params,
                prior_nat_params=self.state.prior.nat_params)

            if (np.isclose(last_elbo, elbo)):
                break

            last_elbo = elbo
Пример #17
0
    def parameterCheck( self, initialDist, transDist, emissionDist, ys=None ):
        assert initialDist.shape[ 0 ] == transDist.shape[ 0 ] and transDist.shape[ 0 ] == transDist.shape[ 1 ]
        assert emissionDist.shape[ 0 ] == transDist.shape[ 0 ]

        assert np.isclose( 1.0, initialDist.sum() )
        assert np.allclose( np.ones( transDist.shape[ 0 ] ), transDist.sum( axis=1 ) )
        assert np.allclose( np.ones( transDist.shape[ 0 ] ), emissionDist.sum( axis=1 ) )
Пример #18
0
    def compare_smoother_grads(lds):
        init_params, pair_params, node_params = lds

        symmetrize = make_unop(
            lambda x: (x + x.T) / 2. if np.ndim(x) == 2 else x, tuple)

        messages, _ = natural_filter_forward_general(*lds)
        dotter = randn_like(natural_smoother_general(messages, *lds))

        def py_fun(messages):
            result = natural_smoother_general(messages, *lds)
            assert shape(result) == shape(dotter)
            return contract(dotter, result)

        dense_messages, _ = _natural_filter_forward_general(
            init_params, pair_params, node_params)

        def cy_fun(messages):
            result = _natural_smoother_general(messages, pair_params)
            result = result[0][:3], result[1], result[2]
            assert shape(result) == shape(dotter)
            return contract(dotter, result)

        result_py = py_fun(messages)
        result_cy = cy_fun(dense_messages)
        assert np.isclose(result_py, result_cy)

        g_py = grad(py_fun)(messages)
        g_cy = unpack_dense_messages(grad(cy_fun)(dense_messages))

        assert allclose(g_py, g_cy)
Пример #19
0
    def calc_update(self,
                    x,
                    p,
                    trust_radius,
                    trust_radius_max,
                    obj,
                    quality_required=0.2,
                    quality_low=0.25,
                    quality_high=0.75):
        # Parameter checks
        if not quality_required < quality_low < quality_high:
            raise ValueError(
                'Invalid quality parameters, must be: quality_required < quality_low < quality_high'
            )

        df = obj.function(x) - obj.function(x + p)
        dm = self.model(x, np.zeros_like(x), obj) - self.model(x, p, obj)
        quality = df / dm

        if quality < quality_low:
            trust_radius_new = quality_low * trust_radius
        else:
            if quality > quality_high and np.isclose(la.norm(p), trust_radius):
                trust_radius_new = min(2 * trust_radius, trust_radius_max)
            else:
                trust_radius_new = np.copy(trust_radius)

        if quality > quality_required:
            x_new = x + p
        else:
            x_new = np.copy(x)

        return x_new, trust_radius_new
Пример #20
0
def test_covariance_symmetry():
    value1 = np.random.normal(5, 10)
    dvalue1 = np.abs(np.random.normal(0, 1))
    test_obs1 = pe.pseudo_Obs(value1, dvalue1, 't')
    test_obs1.gamma_method()
    value2 = np.random.normal(5, 10)
    dvalue2 = np.abs(np.random.normal(0, 1))
    test_obs2 = pe.pseudo_Obs(value2, dvalue2, 't')
    test_obs2.gamma_method()
    cov_ab = pe.covariance(test_obs1, test_obs2)
    cov_ba = pe.covariance(test_obs2, test_obs1)
    assert np.abs(cov_ab - cov_ba) <= 10 * np.finfo(np.float64).eps
    assert np.abs(cov_ab) < test_obs1.dvalue * test_obs2.dvalue * (
        1 + 10 * np.finfo(np.float64).eps)

    N = 100
    arr = np.random.normal(1, .2, size=N)
    configs = np.ones_like(arr)
    for i in np.random.uniform(0, len(arr), size=int(.8 * N)):
        configs[int(i)] = 0
    zero_arr = [arr[i] for i in range(len(arr)) if not configs[i] == 0]
    idx = [i + 1 for i in range(len(configs)) if configs[i] == 1]
    a = pe.Obs([zero_arr], ['t'], idl=[idx])
    a.gamma_method()
    assert np.isclose(a.dvalue**2, pe.covariance(a, a), atol=100, rtol=1e-4)

    cov_ab = pe.covariance(test_obs1, a)
    cov_ba = pe.covariance(a, test_obs1)
    assert np.abs(cov_ab - cov_ba) <= 10 * np.finfo(np.float64).eps
    assert np.abs(cov_ab) < test_obs1.dvalue * test_obs2.dvalue * (
        1 + 10 * np.finfo(np.float64).eps)
Пример #21
0
def expected_sfs_tensor_prod(vecs,
                             demography,
                             mut_rate=1.0,
                             sampled_pops=None):
    """
    Viewing the SFS as a D-tensor (where D is the number of demes), this
    returns a 1d array whose j-th entry is a certain summary statistic of the
    expected SFS, given by the following tensor-vector multiplication:

    res[j] = \sum_{(i0,i1,...)} E[sfs[(i0,i1,...)]] * vecs[0][j,i0] * vecs[1][j, i1] * ...

    where E[sfs[(i0,i1,...)]] is the expected SFS entry for config
    (i0,i1,...), as given by expected_sfs

    Parameters
    ----------
    vecs : sequence of 2-dimensional numpy.ndarray
         length-D sequence, where D = number of demes in the demography.
         vecs[k] is 2-dimensional array, with constant number of rows, and
         with n[k]+1 columns, where n[k] is the number of samples in the
         k-th deme. The row vector vecs[k][j,:] is multiplied against
         the expected SFS along the k-th mode, to obtain res[j].
    demo : Demography
    mut_rate : float
         the rate of mutations per unit time

    Returns
    -------
    res : numpy.ndarray (1-dimensional)
        res[j] is the tensor multiplication of the sfs against the vectors
        vecs[0][j,:], vecs[1][j,:], ... along its tensor modes.

    See Also
    --------
    sfs_tensor_prod : compute the same summary statistics for an observed SFS
    expected_sfs : compute individual SFS entries
    expected_total_branch_len, expected_tmrca, expected_deme_tmrca :
         examples of coalescent statistics that use this function
    """
    # NOTE cannot use vecs[i] = ... due to autograd issues
    sampled_n = [np.array(v).shape[-1] - 1 for v in vecs]
    vecs = [
        np.vstack([
            np.array([1.0] + [0.0] * n),  # all ancestral state
            np.array([0.0] * n + [1.0]),  # all derived state
            v
        ]) for v, n in zip(vecs, demography.sampled_n)
    ]

    res = _expected_sfs_tensor_prod(vecs, demography, mut_rate=mut_rate)

    # subtract out mass for all ancestral/derived state
    for k in (0, 1):
        res = res - res[k] * np.prod([l[:, -k] for l in vecs], axis=0)
        assert np.isclose(res[k], 0.0)
    # remove monomorphic states
    res = res[2:]

    return res
Пример #22
0
def gamma(t: np.ndarray, P1: tuple or np.ndarray, P2: tuple or np.ndarray,
          u1: tuple or np.ndarray, u2: tuple or np.ndarray) -> np.ndarray:
    """
    This function generates a polynomial interpolation between two points in a 2D plane.

    :param t: The interpolated curve's normalized parameter
    :param P1: The first point tuple
    :param P2: The second point tuple
    :param u1: The first derivative vector
    :param u2: The second derivative vector

    :returns: The interpolated points' array
    """

    #   1. Unpacking the variables for easier processing
    u11, u12 = u1
    u21, u22 = u2
    x1, y1 = P1
    x2, y2 = P2

    #   2. Calculating the determinant of the (u1, u2) couple
    denom = (u12 * u21) - (u11 * u22)

    #   3.1. Using the second-degree polynomial interpolation method when possible
    if not np.isclose(denom, 0):
        #   3.1.1. Calculating the values of k1 and k2
        k1 = 2 * (((y2 - y1) * u21) - ((x2 - x1) * u22)) / denom
        k2 = 2 * (((x2 - x1) * u12) - ((y2 - y1) * u11)) / denom

        #   3.1.2. Calculating the explicit values of the a, b, c, d, e, f parameters
        a = x1
        b = k1 * u11
        c = k2 * u21 + x1 - x2
        d = y1
        e = k1 * u12
        f = k2 * u22 + y1 - y2

        #   3.1.3. Calculating the values of the x, y variables
        x = a + b * t + c * t**2
        y = d + e * t + f * t**2

    #   3.2. Switching to a linear interpolation method (ignoring the derivative vectors) if u1 and u2 are parallel vectors
    else:
        #   3.2.1. Calculating the explicit values of the a, b, c, d parameters
        a = x1
        b = x2 - x1
        c = y1
        d = y2 - y1

        #   3.2.2. Calculating the values of the x, y variables
        x = a + b * t
        y = c + d * t

    #   4. Concatenating the results into a single coordinates array
    points_interpolated = np.empty((2, t.shape[0]))
    points_interpolated[0, :] = x
    points_interpolated[1, :] = y

    return points_interpolated
Пример #23
0
def testGaussianHMM():

    T = 100
    K = 20
    obsDim = 40
    D = 4

    mp = GaussianHMM()

    initialDist = Dirichlet.generate(D=K)
    transDist = TransitionDirichletPrior.generate(D_in=K, D_out=K)

    mus, sigmas = list(
        zip(*[NormalInverseWishart.generate(D=obsDim) for _ in range(K)]))

    ys = np.random.random((D, T, obsDim))

    start = time.time()
    mp.updateParams(initialDist, transDist, mus, sigmas, ys)
    end = time.time()
    print('Preprocess: ', end - start)

    kS = int(np.random.random() * T / 10) + 2
    knownStates = np.random.choice(T, kS)
    knownStates = np.vstack(
        (knownStates, np.random.choice(K, knownStates.shape[0]))).reshape(
            (2, -1)).T

    # Sort and remove duplicates
    knownStates = np.array(sorted(knownStates, key=lambda x: x[0]))
    knownStates = knownStates[1:][~(np.diff(knownStates[:, 0]) == 0)]

    start = time.time()
    alphas = mp.forwardFilter(knownLatentStates=knownStates)
    betas = mp.backwardFilter(knownLatentStates=knownStates)
    end = time.time()
    print('Both filters: ', end - start)

    marginal = np.logaddexp.reduce(alphas[-1])

    for a, b in zip(alphas, betas):
        # comp = np.logaddexp.reduce( a + b )
        comp = mp.log_marginalFromAlphaBeta(a, b)
        assert np.isclose(comp, marginal), comp - marginal

    for t in range(T - 1):
        joint = mp.childParentJoint(t, alphas, betas)

        parentProb = np.logaddexp.reduce(joint, axis=1)
        childProb = np.logaddexp.reduce(joint, axis=0)

        trueParent = alphas[t] + betas[t]
        trueChild = alphas[t + 1] + betas[t + 1]

        assert np.allclose(parentProb, trueParent)
        assert np.allclose(childProb, trueChild)

    print('Passed the gaussian forward backward marginal test!\n\n')
Пример #24
0
def comp_list_to_lnpdf(comp_list, normalized=True):
    means, covars, icovs, chols, lndets, pis = comp_list_to_matrices(comp_list)
    if not normalized:
        pis /= np.sum(pis)
    assert np.isclose(np.sum(pis), 1.), "pis need to be normalized"

    lnpdf = lambda z: mog.mog_logprob(z, means, icovs, lndets, pis)
    sample = lambda n: mog.mog_samples(n, means, chols, pis)
    return lnpdf, sample
Пример #25
0
def test_drift_force(X, beta):
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        np_drift = 2 * grad(jastrow_np, 0)(X, 0.5, beta) / jastrow_np(X, 0.5, beta)

    psi = JastrowPade(0.5, beta)
    for expect, actual in zip(np_drift.ravel(), psi.drift_force(X)):
        if math.isfinite(expect):
            assert np.isclose(expect, actual)
Пример #26
0
    def compare_E_step(lds, data):
        natparam = init_params, pair_params, node_params = lds_standard_to_natparam(*lds)
        general_node_params = get_general_node_params(data, lds)
        C, sigma_obs = lds[-2:]
        sample, E_stats, lognorm = natural_lds_inference(natparam, data)
        sample2, E_stats2, lognorm2 = natural_lds_inference_general(
            (init_params, pair_params), general_node_params)
        sample3, E_stats3, lognorm3 = natural_lds_inference_general_nosaving(
            (init_params, pair_params), general_node_params)
        sample4, E_stats4, lognorm4 = natural_lds_inference_general_autograd(
            (init_params, pair_params), general_node_params)

        assert allclose(E_stats[:-1], E_stats2[:-1])
        assert allclose(E_stats2, E_stats3)
        assert allclose(E_stats2, E_stats4)
        assert np.isclose(lognorm, lognorm2)
        assert np.isclose(lognorm, lognorm3)
        assert np.isclose(lognorm, lognorm4)
Пример #27
0
def calc_boundary_phases(k, params):
    l = params['l']
    L_0 = params['L_0']
    C_0 = params['C_0']
    C_i = params['C_i']
    C_o = params['C_o']
    Z_0 = np.sqrt(L_0 / C_0)
    velocity = 1 / np.sqrt(L_0 * C_0)
    omega = velocity * k
    if not np.isclose(C_i / (2 * l * C_0), 0.0):
        phi_i = np.arctan(1 / np.abs(Z_0 * omega * C_i))
    else:
        phi_i = np.pi / 2
    if not np.isclose(C_o / (2 * l * C_0), 0.0):
        phi_o = np.arctan(1 / np.abs(Z_0 * omega * C_o))
    else:
        phi_o = np.pi / 2
    return phi_i, phi_o
Пример #28
0
def qexp(q):
    norm = agnp.linalg.norm(q[1:4])
    e = agnp.exp(q[0])
    result_w = e * agnp.cos(norm)
    if agnp.isclose(norm, 0):
        result_v = agnp.zeros(3)
    else:
        result_v = e * q[1:4] / norm * agnp.sin(norm)
    return agnp.concatenate((agnp.array([result_w]), result_v))
Пример #29
0
def test_drift_force(X, n, beta):
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        np_drift = 2 * grad(jastrow_np, 0)(X, n, beta) / jastrow_np(X, n, beta)

    psi = JastrowMcMillian(n, beta, L)
    for expect, actual in zip(np_drift.ravel(), psi.drift_force(X)):
        if math.isfinite(expect):
            assert np.isclose(expect, actual, equal_nan=True)
Пример #30
0
    def get_action_one_step(self, state, t):

        mean = np.dot(self.K[t], state) + self.k[t]
        # todo remove random here
        if np.isclose(0.0, self.std[t]).all() is False:
            return np.clip(np.random.normal(mean, self.std[t]),
                           self.control_low, self.control_high)
        else:
            return np.clip(mean, self.control_low, self.control_high)
Пример #31
0
def test_gradient(X, alpha, beta):
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        np_grad_alpha = grad(jastrow_np, 1)(X, alpha, beta) / jastrow_np(
            X, alpha, beta)
        np_grad_beta = grad(jastrow_np, 2)(X, alpha, beta) / jastrow_np(
            X, alpha, beta)

    psi_alpha_const = JastrowPade(alpha, beta)
    psi = JastrowPade(alpha, beta, False)
    actual = psi.gradient(X)
    assert 2 == len(actual)

    assert psi_alpha_const.gradient(X)[0] == 0

    if math.isfinite(np_grad_beta):
        assert np.isclose(np_grad_alpha, actual[0])
        assert np.isclose(np_grad_beta, actual[1])
Пример #32
0
    def compare_filters(lds, data):
        (filtered_mus, filtered_sigmas), loglike = filter_forward(data, *lds)

        messages, lognorm = natural_filter_forward(lds_standard_to_natparam(*lds), data)
        prediction_messages, filter_messages = uninterleave(messages)
        natural_filtered_mus, natural_filtered_sigmas = zip(*map(natural_to_mean, filter_messages))

        assert all(map(np.allclose, filtered_mus, natural_filtered_mus))
        assert all(map(np.allclose, filtered_sigmas, natural_filtered_sigmas))
        assert np.isclose(loglike, lognorm)
    def compare_E_step(lds, data):
        natparam = init_params, pair_params, node_params = lds_standard_to_natparam(
            *lds)
        general_node_params = get_general_node_params(data, lds)
        C, sigma_obs = lds[-2:]
        sample, E_stats, lognorm = natural_lds_inference(natparam, data)
        sample2, E_stats2, lognorm2 = natural_lds_inference_general(
            (init_params, pair_params), general_node_params)
        sample3, E_stats3, lognorm3 = natural_lds_inference_general_nosaving(
            (init_params, pair_params), general_node_params)
        sample4, E_stats4, lognorm4 = natural_lds_inference_general_autograd(
            (init_params, pair_params), general_node_params)

        assert allclose(E_stats[:-1], E_stats2[:-1])
        assert allclose(E_stats2, E_stats3)
        assert allclose(E_stats2, E_stats4)
        assert np.isclose(lognorm, lognorm2)
        assert np.isclose(lognorm, lognorm3)
        assert np.isclose(lognorm, lognorm4)
Пример #34
0
    def compare_filters(lds):
        init_params, pair_params, node_params = lds
        messages1, lognorm1 = natural_filter_forward_general(
            init_params, pair_params, node_params)
        dense_messages2, lognorm2 = _natural_filter_forward_general(
            init_params, pair_params, node_params)
        messages2 = unpack_dense_messages(dense_messages2)

        assert allclose(messages1, messages2)
        assert np.isclose(lognorm1, lognorm2)
Пример #35
0
def test_natural_predict():
    npr.seed(0)
    n = 3

    J = rand_psd(n)
    h = npr.randn(n)
    bigJ = rand_psd(2*n)
    J11, J12, J22 = bigJ[:n,:n], bigJ[:n,n:], bigJ[n:,n:]
    logZ = npr.randn()
    J, J11, J12, J22 = -1./2*J, -1./2*J11, -J12, -1./2*J22

    (J_pred_1, h_pred_1), lognorm1 = _natural_predict(J, h, J11, J12, J22, logZ)
    (J_pred_2, h_pred_2), lognorm2 = __natural_predict(J, h, J11, J12, J22, logZ)

    assert np.allclose(J_pred_1, J_pred_2)
    assert np.allclose(h_pred_1, h_pred_2)
    assert np.isclose(lognorm1, lognorm2)
Пример #36
0
def allclose(m1, m2):
    if isinstance(m1, np.ndarray):
        return np.allclose(m1, m2)
    elif np.isscalar(m1):
        return np.isclose(m1, m2)
    return len(m1) == len(m2) and all(map(allclose, m1, m2))
Пример #37
0
 def compare_lognorms(hmm):
     py_logZ = python_hmm_logZ(hmm)
     cy_logZ = cython_hmm_logZ(hmm)
     cy_logZ2 = cython_hmm_logZ_normalized(hmm)[0]
     assert np.isclose(py_logZ, cy_logZ)
     assert np.isclose(py_logZ, cy_logZ2)