Example #1
0
def test_psd_matlab():

    """ Test the results of mlab csd/psd against saved results from Matlab"""

    from matplotlib import mlab

    test_dir_path = os.path.join(nitime.__path__[0],'tests')
    
    ts = np.loadtxt(os.path.join(test_dir_path,'tseries12.txt'))
    
    #Complex signal! 
    ts0 = ts[1] + ts[0]*np.complex(0,1) 

    NFFT = 256;
    Fs = 1.0;
    noverlap = NFFT/2

    fxx, f = mlab.psd(ts0,NFFT=NFFT,Fs=Fs,noverlap=noverlap,
                      scale_by_freq=True)

    fxx_mlab = np.fft.fftshift(fxx).squeeze()

    fxx_matlab = np.loadtxt(os.path.join(test_dir_path,'fxx_matlab.txt'))

    npt.assert_almost_equal(fxx_mlab,fxx_matlab,decimal=5)
Example #2
0
    def test_emcee_lnpost(self):
        # check ln likelihood is calculated correctly. It should be
        # -0.5 * chi**2.
        result = self.mini.minimize()

        # obtain the numeric values
        # note - in this example all the parameters are varied
        fvars = np.array([par.value for par in result.params.values()])

        # calculate the cost function with scaled values (parameters all have
        # lower and upper bounds.
        scaled_fvars = []
        for par, fvar in zip(result.params.values(), fvars):
            par.value = fvar
            scaled_fvars.append(par.setup_bounds())

        val = self.mini.penalty(np.array(scaled_fvars))

        # calculate the log-likelihood value
        bounds = np.array([(par.min, par.max)
                           for par in result.params.values()])
        val2 = _lnpost(fvars,
                       self.residual,
                       result.params,
                       result.var_names,
                       bounds,
                       userargs=(self.x, self.data))

        assert_almost_equal(-0.5 * val, val2)
Example #3
0
    def test_pbcbox(self):
        ref_boxes = [numpy.array([[47.100067138671875, 0.0, 0.0], [0.0, 47.29520797729492, 0.0],
            [23.550033569335938, 23.64760398864746, 31.985841751098633]]),
            numpy.array([[47.08340835571289, 0.0, 0.0], [0.0, 47.30361557006836, 0.0],
                [23.541704177856445, 23.65180778503418, 31.991649627685547]]),
            numpy.array([[47.08655548095703, 0.0, 0.0], [0.0, 47.298133850097656, 0.0],
                [23.543277740478516, 23.649066925048828, 31.99555778503418]]),
            numpy.array([[47.07117462158203, 0.0, 0.0], [0.0, 47.30364227294922, 0.0],
                [23.535587310791016, 23.65182113647461, 31.99390983581543]]),
            numpy.array([[47.079769134521484, 0.0, 0.0], [0.0, 47.30522918701172, 0.0],
                [23.539884567260742, 23.65261459350586, 31.995153427124023]]),
            numpy.array([[47.07292175292969, 0.0, 0.0], [0.0, 47.30583953857422, 0.0],
                [23.536460876464844, 23.65291976928711, 32.00019073486328]]),
            numpy.array([[47.066261291503906, 0.0, 0.0], [0.0, 47.3122444152832, 0.0],
                [23.533130645751953, 23.6561222076416, 31.99447250366211]]),
            numpy.array([[47.06310272216797, 0.0, 0.0], [0.0, 47.31338119506836, 0.0],
                [23.531551361083984, 23.65669059753418, 31.999902725219727]]),
            numpy.array([[47.075565338134766, 0.0, 0.0], [0.0, 47.29836654663086, 0.0],
                [23.537782669067383, 23.64918327331543, 32.0003662109375]]),
            numpy.array([[47.0737419128418, 0.0, 0.0], [0.0, 47.295833587646484, 0.0],
                [23.5368709564209, 23.647916793823242, 32.00419235229492]]),
            numpy.array([[47.06859588623047, 0.0, 0.0], [0.0, 47.29644775390625, 0.0],
                [23.534297943115234, 23.648223876953125, 32.00706100463867]]),
        ]

        for i, frame in enumerate(self.traj):
            assert_almost_equal(frame.box.asarray(), ref_boxes[i], 5)
Example #4
0
 def setup_method(self, method):
     self.som = MiniSom(5,5,1)
     for w in self.som.weights: # checking weights normalization
         assert_almost_equal(1.0,linalg.norm(w))
     self.som.weights = zeros((5,5)) # fake weights
     self.som.weights[2,3] = 5.0
     self.som.weights[1,1] = 2.0
Example #5
0
def check_skew_expect(distfn, arg, m, v, s, msg):
    if np.isfinite(s):
        m3e = distfn.expect(lambda x: np.power(x-m, 3), arg)
        npt.assert_almost_equal(m3e, s * np.power(v, 1.5),
                decimal=5, err_msg=msg + ' - skew')
    else:
        npt.assert_(np.isnan(s))
Example #6
0
    def test_lipid_atomids(self):
        atomids = self.traj[0].lipid_atomids
        ref_group_firsts = numpy.array([1, 2, 3, 4, 5], dtype=int)
        ref_group_lasts = numpy.array([129596, 129597, 129598, 129599, 129600], dtype=int)

        assert_almost_equal(atomids[0][:5], ref_group_firsts)
        assert_almost_equal(atomids[-1][-5:], ref_group_lasts)
Example #7
0
    def test_lipid_atomids(self):
        atomids = self.traj[0].lipid_atomids
        ref_group_firsts = numpy.array([1, 2, 3, 4, 5], dtype=int)
        ref_group_lasts = numpy.array([33620, 33621, 33622, 33623, 33624], dtype=int)

        assert_almost_equal(atomids[0][:5], ref_group_firsts)
        assert_almost_equal(atomids[-1][-5:], ref_group_lasts)
Example #8
0
def test_mat2euler():
    # Test mat2euler function
    angles = (4 * math.pi) * (np.random.random(3) - 0.5)
    for axes in euler._AXES2TUPLE.keys():
       R0 = euler2mat(axes=axes, *angles)
       R1 = euler2mat(axes=axes, *mat2euler(R0, axes))
       assert_almost_equal(R0, R1)
Example #9
0
    def test_lipid_atomids(self):
        atomids = self.traj[0].lipid_atomids
        ref_group_firsts = numpy.array([2224, 2225, 2226, 2227, 2228], dtype=int)
        ref_group_lasts = numpy.array([26219, 26220, 26221, 26222, 26223], dtype=int)

        assert_almost_equal(atomids[0][:5], ref_group_firsts)
        assert_almost_equal(atomids[-1][-5:], ref_group_lasts)
Example #10
0
def test_arburg_imag_output():
    a, b, c = arburg(marple_data, 15)


    a_e, b_e, c_e = (numpy.array([ 2.70936368 -0.77610302j,  5.17482864 -2.73293024j,
        7.03527787 -6.15070038j,  7.89423853-10.20591369j,
        6.84853701-14.07469247j,  4.56915619-16.84486008j,
        1.32687590-18.13284671j, -1.87811360-17.49937286j,
       -4.64976221-15.05888331j, -6.22557823-11.25070227j,
       -6.28367510 -6.93498375j, -4.89652279 -3.24910899j,
       -2.99758653 -0.8736847j , -1.32183647 +0.04527281j,
       -0.35565856 +0.14754881j]),
     0.0054379699760549929,
     numpy.array([-0.18570222-0.87179346j,  0.26402371-0.5190592j ,
        0.07162311-0.46372011j,  0.44463099+0.05080174j,
       -0.02634972-0.14691215j,  0.19255061-0.37032848j,
       -0.25994598-0.55924338j, -0.20237974-0.23641516j,
       -0.40546748-0.40598876j, -0.47824854-0.42553068j,
       -0.51507096-0.49435948j, -0.32530245-0.49134098j,
       -0.21950049-0.37261937j, -0.28613904-0.0921211j ,
       -0.35565856+0.14754881j]))


    assert_array_almost_equal(a, a_e)
    assert_almost_equal(b,  b_e)
    assert_array_almost_equal(c, c_e)
Example #11
0
    def test_spectrum_section(self):
        assert_almost_equal(self.config['spectrum']['start'],
                            parse_quantity(self.yaml_data['spectrum']['start']))
        assert_almost_equal(self.config['spectrum']['end'],
                            parse_quantity(self.yaml_data['spectrum']['stop']))

        assert self.config['spectrum']['bins'] == self.yaml_data['spectrum']['num']
Example #12
0
 def test_vecself(self):
     """Ticket 844."""
     # Inner product of a vector with itself segfaults or give meaningless
     # result
     a = zeros(shape = (1, 80), dtype = float64)
     p = inner_(a, a)
     assert_almost_equal(p, 0, decimal = DECPREC)
Example #13
0
def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC):
    """
    Test that dense liblinear honours intercept_scaling param
    """
    X = [[2, 1],
         [3, 1],
         [1, 3],
         [2, 3]]
    y = [0, 0, 1, 1]
    clf = classifier(fit_intercept=True, penalty='l1', loss='l2',
                     dual=False, C=1, tol=1e-7)
    assert clf.intercept_scaling == 1, clf.intercept_scaling
    assert clf.fit_intercept

    # when intercept_scaling is low the intercept value is highly "penalized"
    # by regularization
    clf.intercept_scaling = 1
    clf.fit(X, y)
    assert_almost_equal(clf.intercept_, 0, decimal=5)

    # when intercept_scaling is sufficiently high, the intercept value
    # is not affected by regularization
    clf.intercept_scaling = 100
    clf.fit(X, y)
    intercept1 = clf.intercept_
    assert intercept1 < -1

    # when intercept_scaling is sufficiently high, the intercept value
    # doesn't depend on intercept_scaling value
    clf.intercept_scaling = 1000
    clf.fit(X, y)
    intercept2 = clf.intercept_
    assert_array_almost_equal(intercept1, intercept2, decimal=2)
Example #14
0
    def test_smallkernel_vs_makekernel(self, shape, width):
        """
        Test smoothing of an image with a single positive pixel

        Compares a small kernel to something produced by makekernel
        """

        kernel1 = np.ones([width, width]) / np.float(width) ** 2
        kernel2 = make_kernel(shape, width, kerneltype='boxcar')

        x = np.zeros(shape)
        xslice = [slice(sh // 2, sh // 2 + 1) for sh in shape]
        x[xslice] = 1.0

        c2 = convolve_fft(x, kernel2, boundary='fill')
        c1 = convolve_fft(x, kernel1, boundary='fill')

        print shape, width
        assert_almost_equal(c1, c2, decimal=12)

        if width % 2 == 1:
            kernel2 = make_kernel(shape, width, kerneltype='boxcar', force_odd=True)

            c2 = convolve(x, kernel2, boundary='fill')
            c1 = convolve(x, kernel1, boundary='fill')

            print shape, width

            assert_almost_equal(c1, c2, decimal=12)
Example #15
0
    def test_background_subtract_line(self):
        # checked each step of the background subtraction with IGOR
        # so this test background correction should be correct.

        # create some test data
        xvals = np.linspace(-10, 10, 201)
        yvals = np.ceil(gauss(xvals, 0, 100, 0, 1) + 2 * xvals + 30)

        # add some reproducible random noise
        np.random.seed(1)
        yvals += np.sqrt(yvals) * np.random.randn(yvals.size)
        yvals_sd = np.sqrt(yvals)

        mask = np.zeros(201, np.bool)
        mask[30:70] = True
        mask[130:160] = True

        profile, profile_sd = plp.background_subtract_line(yvals,
                                                           yvals_sd,
                                                           mask)

        verified_data = np.load(os.path.join(self.path,
                                             'background_subtract.npy'))

        assert_almost_equal(verified_data, np.c_[profile, profile_sd])
Example #16
0
 def test_06(self):
     # Second order system with a repeated root: x''(t) + 2*x(t) + x(t) = u(t)
     # The exact step response is 1 - (1 + t)*exp(-t).
     system = ([1.0], [1.0, 2.0, 1.0])
     tout, y = step2(system, atol=1e-10, rtol=1e-8)
     expected_y = 1 - (1 + tout) * np.exp(-tout)
     assert_almost_equal(y, expected_y)
Example #17
0
def test_SparseCoherenceAnalyzer():
    Fs = np.pi
    t = np.arange(256)
    x = np.sin(10 * t) + np.random.rand(t.shape[-1])
    y = np.sin(10 * t) + np.random.rand(t.shape[-1])
    T = ts.TimeSeries(np.vstack([x, y]), sampling_rate=Fs)
    C1 = nta.SparseCoherenceAnalyzer(T, ij=((0, 1), (1, 0)))
    C2 = nta.CoherenceAnalyzer(T)

    # Coherence symmetry:
    npt.assert_equal(np.abs(C1.coherence[0, 1]), np.abs(C1.coherence[1, 0]))
    npt.assert_equal(np.abs(C1.coherency[0, 1]), np.abs(C1.coherency[1, 0]))

    # Make sure you get the same answers as you would from the standard
    # CoherenceAnalyzer:

    npt.assert_almost_equal(C2.coherence[0, 1], C1.coherence[0, 1])
    # This is the PSD (for the first time-series in the object):
    npt.assert_almost_equal(C2.spectrum[0, 0], C1.spectrum[0])
    # And the second (for good measure):
    npt.assert_almost_equal(C2.spectrum[1, 1], C1.spectrum[1])

    # The relative phases should be equal
    npt.assert_almost_equal(C2.phase[0, 1], C1.relative_phases[0, 1])
    # But not the absolute phases (which have the same shape):
    npt.assert_equal(C1.phases[0].shape, C1.relative_phases[0, 1].shape)

    # The delay is equal:
    npt.assert_almost_equal(C2.delay[0, 1], C1.delay[0, 1])
    # Make sure that you would get an error if you provided a method other than
    # 'welch':
    npt.assert_raises(ValueError, nta.SparseCoherenceAnalyzer, T,
                                                    method=dict(this_method='foo'))
Example #18
0
 def test_01(self):
     # First order system: x'(t) + x(t) = u(t)
     # Exact step response is x(t) = 1 - exp(-t).
     system = ([1.0],[1.0,1.0])
     tout, y = step2(system)
     expected_y = 1.0 - np.exp(-tout)
     assert_almost_equal(y, expected_y)
Example #19
0
 def test_05(self):
     # Simple integrator: x'(t) = u(t)
     # Exact step response is x(t) = t.
     system = ([1.0],[1.0,0.0])
     tout, y = step2(system, atol=1e-10, rtol=1e-8)
     expected_y = tout
     assert_almost_equal(y, expected_y)
Example #20
0
 def test_01(self):
     # First order system: x'(t) + x(t) = u(t)
     # Exact impulse response is x(t) = exp(-t).
     system = ([1.0],[1.0,1.0])
     tout, y = impulse2(system)
     expected_y = np.exp(-tout)
     assert_almost_equal(y, expected_y)
Example #21
0
 def test_06(self):
     # Second order system with a repeated root: x''(t) + 2*x(t) + x(t) = u(t)
     # The exact impulse response is t*exp(-t).
     system = ([1.0], [1.0, 2.0, 1.0])
     tout, y = impulse2(system)
     expected_y = tout * np.exp(-tout)
     assert_almost_equal(y, expected_y)
def check_pmf_cdf(distfn, arg, msg):
    startind = np.int(distfn._ppf(0.01,*arg)-1)
    index = list(range(startind,startind+10))
    cdfs = distfn.cdf(index,*arg)
    npt.assert_almost_equal(cdfs, distfn.pmf(index, *arg).cumsum() +
                            cdfs[0] - distfn.pmf(index[0],*arg),
                            decimal=4, err_msg=msg + 'pmf-cdf')
 def test_cell_magic_number_complex(self):
     # A complex number
     self.ip.run_cell("x = 3.34+4.56j")
     self.ip.run_cell_magic('matlab', '-i x -o y', 'y = x*(11.35 - 23.098j)')
     self.ip.run_cell("res = x*(11.35 - 23.098j)")
     npt.assert_almost_equal(self.ip.user_ns['y'],
                             self.ip.user_ns['res'], decimal=7)
Example #24
0
def test_t_contrast_add():
    mulm, n, p, q = ols_glm()
    c1, c2 = np.eye(q)[0], np.eye(q)[1]
    con = mulm.contrast(c1) + mulm.contrast(c2)
    z_vals = con.z_score()
    assert_almost_equal(z_vals.mean(), 0, 0)
    assert_almost_equal(z_vals.std(), 1, 0)
Example #25
0
def test_energy():
    # make sure that energy as computed by ssvm is the same as by lp
    np.random.seed(0)
    for inference_method in get_installed(["lp", "ad3"]):
        found_fractional = False
        crf = DirectionalGridCRF(n_states=3, n_features=3,
                                 inference_method=inference_method)
        while not found_fractional:
            x = np.random.normal(size=(7, 8, 3))
            unary_params = np.random.normal(size=(3, 3))
            pw1 = np.random.normal(size=(3, 3))
            pw2 = np.random.normal(size=(3, 3))
            w = np.hstack([unary_params.ravel(), pw1.ravel(), pw2.ravel()])
            res, energy = crf.inference(x, w, relaxed=True, return_energy=True)
            found_fractional = np.any(np.max(res[0], axis=-1) != 1)

            joint_feature = crf.joint_feature(x, res)
            energy_svm = np.dot(joint_feature, w)

            assert_almost_equal(energy, -energy_svm)
            if not found_fractional:
                # exact discrete labels, test non-relaxed version
                res, energy = crf.inference(x, w, relaxed=False,
                                            return_energy=True)
                joint_feature = crf.joint_feature(x, res)
                energy_svm = np.dot(joint_feature, w)

                assert_almost_equal(energy, -energy_svm)
Example #26
0
def test_glm_ols():
    mulm, n, p, q = ols_glm()
    assert_array_equal(mulm.labels_, np.zeros(n))
    assert_equal(mulm.results_.keys(), [0.0])
    assert_equal(mulm.results_[0.0].theta.shape, (q, n))
    assert_almost_equal(mulm.results_[0.0].theta.mean(), 0, 1)
    assert_almost_equal(mulm.results_[0.0].theta.var(), 1. / p, 1)
Example #27
0
def test_Fcontrast_1d_old():
    mulm, n, p, q = ols_glm()
    cval = np.hstack((1, np.ones(9)))
    con = mulm.contrast(cval, contrast_type='F')
    z_vals = con.z_score()
    assert_almost_equal(z_vals.mean(), 0, 0)
    assert_almost_equal(z_vals.std(), 1, 0)
 def test_matrix(self):
     self.ip.run_cell("in_array = np.array([[1,2,3], [4,5,6]])")
     self.ip.run_cell_magic('matlab', '-i in_array -o out_array',
                            'out_array = in_array;')
     npt.assert_almost_equal(self.ip.user_ns['out_array'],
                             self.ip.user_ns['in_array'],
                             decimal=7)
Example #29
0
 def test_exponential(self):
     degree = 5
     p = approximate_taylor_polynomial(np.exp, 0, degree, 1, 15)
     for i in xrange(degree+1):
         assert_almost_equal(p(0),1)
         p = p.deriv()
     assert_almost_equal(p(0),0)
def test_kernel_error():
    """

    """
    for xx2d in [np.array(np.meshgrid(np.arange(-100, 100, 5),
                                      np.arange(-100, 100, 5))),

                 np.array(np.meshgrid(np.arange(-100, 100, 5),
                                      np.arange(-100, 100, 5))).reshape(2, -1)]:

        mean1 = [20, 20]
        sigma1 = [10, 10]
        params1 = np.hstack([mean1, sigma1])
        mean2 = [30, 40]
        sigma2 = [10, 50]
        params2 = np.hstack([mean2, sigma2])

        params = [params1, params2]
        betas = [0.3, 0.7]
        
        y = ebp.mixture_of_kernels(xx2d, betas, params,
                                   ebp.gaussian_kernel)

        err = ebp.kernel_err(y, xx2d, betas, params, ebp.gaussian_kernel)

        npt.assert_almost_equal(err, np.zeros(err.shape))
Example #31
0
def _test_ridge_loo(filter_):
    # test that can work with both dense or sparse matrices
    n_samples = X_diabetes.shape[0]

    ret = []

    ridge_gcv = _RidgeGCV(fit_intercept=False)
    ridge = Ridge(alpha=1.0, fit_intercept=False)

    # generalized cross-validation (efficient leave-one-out)
    decomp = ridge_gcv._pre_compute(X_diabetes, y_diabetes)
    errors, c = ridge_gcv._errors(1.0, y_diabetes, *decomp)
    values, c = ridge_gcv._values(1.0, y_diabetes, *decomp)

    # brute-force leave-one-out: remove one example at a time
    errors2 = []
    values2 = []
    for i in range(n_samples):
        sel = np.arange(n_samples) != i
        X_new = X_diabetes[sel]
        y_new = y_diabetes[sel]
        ridge.fit(X_new, y_new)
        value = ridge.predict([X_diabetes[i]])[0]
        error = (y_diabetes[i] - value) ** 2
        errors2.append(error)
        values2.append(value)

    # check that efficient and brute-force LOO give same results
    assert_almost_equal(errors, errors2)
    assert_almost_equal(values, values2)

    # generalized cross-validation (efficient leave-one-out,
    # SVD variation)
    decomp = ridge_gcv._pre_compute_svd(X_diabetes, y_diabetes)
    errors3, c = ridge_gcv._errors_svd(ridge.alpha, y_diabetes, *decomp)
    values3, c = ridge_gcv._values_svd(ridge.alpha, y_diabetes, *decomp)

    # check that efficient and SVD efficient LOO give same results
    assert_almost_equal(errors, errors3)
    assert_almost_equal(values, values3)

    # check best alpha
    ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
    alpha_ = ridge_gcv.alpha_
    ret.append(alpha_)

    # check that we get same best alpha with custom loss_func
    ridge_gcv2 = RidgeCV(fit_intercept=False, loss_func=mean_squared_error)
    ridge_gcv2.fit(filter_(X_diabetes), y_diabetes)
    assert_equal(ridge_gcv2.alpha_, alpha_)

    # check that we get same best alpha with custom score_func
    func = lambda x, y: -mean_squared_error(x, y)
    ridge_gcv3 = RidgeCV(fit_intercept=False, score_func=func)
    ridge_gcv3.fit(filter_(X_diabetes), y_diabetes)
    assert_equal(ridge_gcv3.alpha_, alpha_)

    # check that we get same best alpha with sample weights
    ridge_gcv.fit(filter_(X_diabetes), y_diabetes,
                  sample_weight=np.ones(n_samples))
    assert_equal(ridge_gcv.alpha_, alpha_)

    # simulate several responses
    Y = np.vstack((y_diabetes, y_diabetes)).T

    ridge_gcv.fit(filter_(X_diabetes), Y)
    Y_pred = ridge_gcv.predict(filter_(X_diabetes))
    ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
    y_pred = ridge_gcv.predict(filter_(X_diabetes))

    assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
                              Y_pred, decimal=5)

    return ret
    def test_nmf_topic1(self):
        actual_topic_1_score = self.__nmf.components_[0][3302]
        expected_topic_1_score = 0.2044937886411859

        assert_almost_equal(actual_topic_1_score, expected_topic_1_score, decimal=3)
Example #33
0
def test_ellipticity_h2o_nuclei():
    # test against multiwfn 3.6 dev src
    with path('chemtools.data', 'data_multiwfn36_fchk_h2o_q+0_ub3lyp_ccpvtz.npz') as fname:
        data = np.load(str(fname))
    result = EigenValueTool(data['nuc_hess_eigval']).ellipticity
    assert_almost_equal(result, data['nuc_ellipticity'], decimal=5)
Example #34
0
def test_eccentricity():
    eigenvalues = np.array([[-10., -20., 30.], [20., 10., 20.],
                            [-10., -2., -5.], [-10., 0., 1.]])
    result = EigenValueTool(eigenvalues).eccentricity
    assert_almost_equal(result, [np.nan, 2.**0.5, (-2. / -10.)**0.5, np.nan])
Example #35
0
def test_ellipticity():
    eigenvalues = np.array([[10., 20., 30.], [10., 10., 20.],
                            [-10., -2., -5.], [-10., 0., 1.],
                            [30., 5., 20.]])
    result = EigenValueTool(eigenvalues).ellipticity
    assert_almost_equal(result, [-0.5, 0.0, 1.0, -np.inf, -0.75], decimal=6)
Example #36
0
def test_recursive_response_calibration():
    """
    Test the recursive response calibration method.
    """
    SNR = 100
    S0 = 1

    _, fbvals, fbvecs = get_fnames('small_64D')

    bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs)
    sphere = default_sphere

    gtab = gradient_table(bvals, bvecs)
    evals = np.array([0.0015, 0.0003, 0.0003])
    evecs = np.array([[0, 1, 0], [0, 0, 1], [1, 0, 0]]).T
    mevals = np.array(([0.0015, 0.0003, 0.0003], [0.0015, 0.0003, 0.0003]))
    angles = [(0, 0), (90, 0)]

    where_dwi = lazy_index(~gtab.b0s_mask)

    S_cross, _ = multi_tensor(gtab,
                              mevals,
                              S0,
                              angles=angles,
                              fractions=[50, 50],
                              snr=SNR)

    S_single = single_tensor(gtab, S0, evals, evecs, snr=SNR)

    data = np.concatenate((np.tile(S_cross, (8, 1)), np.tile(S_single,
                                                             (2, 1))),
                          axis=0)

    odf_gt_cross = multi_tensor_odf(sphere.vertices, mevals, angles, [50, 50])

    odf_gt_single = single_tensor_odf(sphere.vertices, evals, evecs)

    response = recursive_response(gtab,
                                  data,
                                  mask=None,
                                  sh_order=8,
                                  peak_thr=0.01,
                                  init_fa=0.05,
                                  init_trace=0.0021,
                                  iter=8,
                                  convergence=0.001,
                                  parallel=False)

    csd = ConstrainedSphericalDeconvModel(gtab, response)

    csd_fit = csd.fit(data)

    assert_equal(np.all(csd_fit.shm_coeff[:, 0] >= 0), True)

    fodf = csd_fit.odf(sphere)

    directions_gt_single, _, _ = peak_directions(odf_gt_single, sphere)
    directions_gt_cross, _, _ = peak_directions(odf_gt_cross, sphere)
    directions_single, _, _ = peak_directions(fodf[8, :], sphere)
    directions_cross, _, _ = peak_directions(fodf[0, :], sphere)

    ang_sim = angular_similarity(directions_cross, directions_gt_cross)
    assert_equal(ang_sim > 1.9, True)
    assert_equal(directions_cross.shape[0], 2)
    assert_equal(directions_gt_cross.shape[0], 2)

    ang_sim = angular_similarity(directions_single, directions_gt_single)
    assert_equal(ang_sim > 0.9, True)
    assert_equal(directions_single.shape[0], 1)
    assert_equal(directions_gt_single.shape[0], 1)

    with warnings.catch_warnings(record=True) as w:
        sphere = Sphere(xyz=gtab.gradients[where_dwi])
        npt.assert_equal(len(w), 1)
        npt.assert_(issubclass(w[0].category, UserWarning))
        npt.assert_("Vertices are not on the unit sphere" in str(w[0].message))
    sf = response.on_sphere(sphere)
    S = np.concatenate(([response.S0], sf))

    tenmodel = dti.TensorModel(gtab, min_signal=0.001)

    tenfit = tenmodel.fit(S)
    FA = fractional_anisotropy(tenfit.evals)
    FA_gt = fractional_anisotropy(evals)
    assert_almost_equal(FA, FA_gt, 1)
Example #37
0
def test_csdeconv():
    SNR = 100
    S0 = 1

    _, fbvals, fbvecs = get_fnames('small_64D')

    bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs)
    gtab = gradient_table(bvals, bvecs, b0_threshold=0)
    mevals = np.array(([0.0015, 0.0003, 0.0003], [0.0015, 0.0003, 0.0003]))

    angles = [(0, 0), (60, 0)]

    S, sticks = multi_tensor(gtab,
                             mevals,
                             S0,
                             angles=angles,
                             fractions=[50, 50],
                             snr=SNR)

    sphere = get_sphere('symmetric362')
    odf_gt = multi_tensor_odf(sphere.vertices, mevals, angles, [50, 50])
    response = (np.array([0.0015, 0.0003, 0.0003]), S0)
    csd = ConstrainedSphericalDeconvModel(gtab, response)
    csd_fit = csd.fit(S)
    assert_equal(csd_fit.shm_coeff[0] > 0, True)
    fodf = csd_fit.odf(sphere)

    directions, _, _ = peak_directions(odf_gt, sphere)
    directions2, _, _ = peak_directions(fodf, sphere)

    ang_sim = angular_similarity(directions, directions2)

    assert_equal(ang_sim > 1.9, True)
    assert_equal(directions.shape[0], 2)
    assert_equal(directions2.shape[0], 2)

    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter("always", category=UserWarning)
        _ = ConstrainedSphericalDeconvModel(gtab, response, sh_order=10)
        assert_greater(
            len([lw for lw in w if issubclass(lw.category, UserWarning)]), 0)

    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter("always", category=UserWarning)
        ConstrainedSphericalDeconvModel(gtab, response, sh_order=8)
        assert_equal(
            len([lw for lw in w if issubclass(lw.category, UserWarning)]), 0)

    mevecs = []
    for s in sticks:
        mevecs += [all_tensor_evecs(s).T]

    S2 = single_tensor(gtab, 100, mevals[0], mevecs[0], snr=None)
    big_S = np.zeros((10, 10, 10, len(S2)))
    big_S[:] = S2

    aresponse, aratio = auto_response(gtab,
                                      big_S,
                                      roi_center=(5, 5, 4),
                                      roi_radius=3,
                                      fa_thr=0.5)
    assert_array_almost_equal(aresponse[0], response[0])
    assert_almost_equal(aresponse[1], 100)
    assert_almost_equal(aratio, response[0][1] / response[0][0])

    auto_response(gtab, big_S, roi_radius=3, fa_thr=0.5)
    assert_array_almost_equal(aresponse[0], response[0])

    _, _, nvoxels = auto_response(gtab,
                                  big_S,
                                  roi_center=(5, 5, 4),
                                  roi_radius=30,
                                  fa_thr=0.5,
                                  return_number_of_voxels=True)
    assert_equal(nvoxels, 1000)
    with warnings.catch_warnings(record=True) as w:
        _, _, nvoxels = auto_response(gtab,
                                      big_S,
                                      roi_center=(5, 5, 4),
                                      roi_radius=30,
                                      fa_thr=1,
                                      return_number_of_voxels=True)
        npt.assert_equal(len(w), 1)
        npt.assert_(issubclass(w[0].category, UserWarning))
        npt.assert_(
            "No voxel with a FA higher than 1 were found" in str(w[0].message))

    assert_equal(nvoxels, 0)
    def test_blocked(self):
        # test alignments offsets for simd instructions
        # alignments for vz + 2 * (vs - 1) + 1
        for dt, sz in [(np.float32, 11), (np.float64, 7), (np.int32, 11)]:
            for out, inp1, inp2, msg in _gen_alignment_data(dtype=dt,
                                                            type='binary',
                                                            max_size=sz):
                exp1 = np.ones_like(inp1)
                inp1[...] = np.ones_like(inp1)
                inp2[...] = np.zeros_like(inp2)
                assert_almost_equal(np.add(inp1, inp2), exp1, err_msg=msg)
                assert_almost_equal(np.add(inp1, 2), exp1 + 2, err_msg=msg)
                assert_almost_equal(np.add(1, inp2), exp1, err_msg=msg)

                np.add(inp1, inp2, out=out)
                assert_almost_equal(out, exp1, err_msg=msg)

                inp2[...] += np.arange(inp2.size, dtype=dt) + 1
                assert_almost_equal(np.square(inp2),
                                    np.multiply(inp2, inp2),  err_msg=msg)
                # skip true divide for ints
                if dt != np.int32 or (sys.version_info.major < 3 and not sys.py3kwarning):
                    assert_almost_equal(np.reciprocal(inp2),
                                        np.divide(1, inp2),  err_msg=msg)

                inp1[...] = np.ones_like(inp1)
                np.add(inp1, 2, out=out)
                assert_almost_equal(out, exp1 + 2, err_msg=msg)
                inp2[...] = np.ones_like(inp2)
                np.add(2, inp2, out=out)
                assert_almost_equal(out, exp1 + 2, err_msg=msg)
Example #39
0
    def test_lkj(self, x, n, p, lp):
        with Model() as model:
            LKJCorr('lkj', n=n, p=p, transform=None)

        pt = {'lkj': x}
        assert_almost_equal(model.fastlogp(pt), lp, decimal=select_by_precision(float64=6, float32=4), err_msg=str(pt))
Example #40
0
def test_select_step_scalar():
    assert_almost_equal(select_step_scalar(33122.), 50000.)
    assert_almost_equal(select_step_scalar(433.), 500.)
    assert_almost_equal(select_step_scalar(12.3), 10)
    assert_almost_equal(select_step_scalar(3.3), 5.)
    assert_almost_equal(select_step_scalar(0.66), 0.5)
    assert_almost_equal(select_step_scalar(0.0877), 0.1)
    assert_almost_equal(select_step_scalar(0.00577), 0.005)
    assert_almost_equal(select_step_scalar(0.00022), 0.0002)
    assert_almost_equal(select_step_scalar(0.000012), 0.00001)
    assert_almost_equal(select_step_scalar(0.000000443), 0.0000005)
Example #41
0
def test_ncdf2dcd_coords(ncdf2dcd):
    ncdf, dcd = ncdf2dcd
    for ts_ncdf, ts_dcd in zip(ncdf.trajectory, dcd.trajectory):
        assert_almost_equal(ts_ncdf.positions, ts_dcd.positions, 3)
Example #42
0
 def test_get_tau_sd(self):
     sd = np.array([2])
     assert_almost_equal(continuous.get_tau_sd(sd=sd), [1. / sd**2, sd])
Example #43
0
    def test_rmsf_single_frame(self):
        rmsfs = MDAnalysis.analysis.rms.RMSF(self.universe.select_atoms('name CA'))
        rmsfs.run(start=5, stop=6, quiet=True)

        assert_almost_equal(rmsfs.rmsf, 0, 5,
                            err_msg="error: rmsfs should all be zero")
Example #44
0
 def test_set_time(self):
     u = mda.Universe(PSF, DCD)
     assert_almost_equal(u.trajectory.time, 1.0, decimal=5)
Example #45
0
def test_hog_basic_orientations_and_data_types():
    # scenario:
    #  1) create image (with float values) where upper half is filled by
    #     zeros, bottom half by 100
    #  2) create unsigned integer version of this image
    #  3) calculate feature.hog() for both images, both with 'transform_sqrt'
    #     option enabled and disabled
    #  4) verify that all results are equal where expected
    #  5) verify that computed feature vector is as expected
    #  6) repeat the scenario for 90, 180 and 270 degrees rotated images

    # size of testing image
    width = height = 35

    image0 = np.zeros((height, width), dtype='float')
    image0[height // 2:] = 100

    for rot in range(4):
        # rotate by 0, 90, 180 and 270 degrees
        image_float = np.rot90(image0, rot)

        # create uint8 image from image_float
        image_uint8 = image_float.astype('uint8')

        (hog_float, hog_img_float) = feature.hog(
            image_float, orientations=4, pixels_per_cell=(8, 8),
            cells_per_block=(1, 1), visualise=True, transform_sqrt=False)
        (hog_uint8, hog_img_uint8) = feature.hog(
            image_uint8, orientations=4, pixels_per_cell=(8, 8),
            cells_per_block=(1, 1), visualise=True, transform_sqrt=False)
        (hog_float_norm, hog_img_float_norm) = feature.hog(
            image_float, orientations=4, pixels_per_cell=(8, 8),
            cells_per_block=(1, 1), visualise=True, transform_sqrt=True)
        (hog_uint8_norm, hog_img_uint8_norm) = feature.hog(
            image_uint8, orientations=4, pixels_per_cell=(8, 8),
            cells_per_block=(1, 1), visualise=True, transform_sqrt=True)

        # set to True to enable manual debugging with graphical output,
        # must be False for automatic testing
        if False:
            import matplotlib.pyplot as plt
            plt.figure()
            plt.subplot(2, 3, 1)
            plt.imshow(image_float)
            plt.colorbar()
            plt.title('image')
            plt.subplot(2, 3, 2)
            plt.imshow(hog_img_float)
            plt.colorbar()
            plt.title('HOG result visualisation (float img)')
            plt.subplot(2, 3, 5)
            plt.imshow(hog_img_uint8)
            plt.colorbar()
            plt.title('HOG result visualisation (uint8 img)')
            plt.subplot(2, 3, 3)
            plt.imshow(hog_img_float_norm)
            plt.colorbar()
            plt.title('HOG result (transform_sqrt) visualisation (float img)')
            plt.subplot(2, 3, 6)
            plt.imshow(hog_img_uint8_norm)
            plt.colorbar()
            plt.title('HOG result (transform_sqrt) visualisation (uint8 img)')
            plt.show()

        # results (features and visualisation) for float and uint8 images must
        # be almost equal
        assert_almost_equal(hog_float, hog_uint8)
        assert_almost_equal(hog_img_float, hog_img_uint8)

        # resulting features should be almost equal when 'transform_sqrt' is enabled
        #  or disabled (for current simple testing image)
        assert_almost_equal(hog_float, hog_float_norm, decimal=4)
        assert_almost_equal(hog_float, hog_uint8_norm, decimal=4)

        # reshape resulting feature vector to matrix with 4 columns (each
        # corresponding to one of 4 directions); only one direction should
        # contain nonzero values (this is manually determined for testing
        # image)
        actual = np.max(hog_float.reshape(-1, 4), axis=0)

        if rot in [0, 2]:
            # image is rotated by 0 and 180 degrees
            desired = [0, 0, 1, 0]
        elif rot in [1, 3]:
            # image is rotated by 90 and 270 degrees
            desired = [1, 0, 0, 0]
        else:
            raise Exception('Result is not determined for this rotation.')

        assert_almost_equal(actual, desired, decimal=2)
Example #46
0
def test_ncdf2dcd_unitcell(ncdf2dcd):
    ncdf, dcd = ncdf2dcd
    for ts_ncdf, ts_dcd in zip(ncdf.trajectory, dcd.trajectory):
        assert_almost_equal(ts_ncdf.dimensions, ts_dcd.dimensions, 3)
Example #47
0
 def test_simple(self):
     x = np.array([1 + 1j, 0 + 2j, 1 + 2j, np.inf, np.nan])
     y_r = np.array([np.sqrt(2.), 2, np.sqrt(5), np.inf, np.nan])
     y = np.abs(x)
     for i in range(len(x)):
         assert_almost_equal(y[i], y_r[i])
Example #48
0
def test_optimize_cutoff(universe, lipid_heads):
    cutoff, N = optimize_cutoff(universe, lipid_heads, pbc=True)
    assert N == 2
    assert_almost_equal(cutoff, 10.5, decimal=4)
Example #49
0
    def test_special_values(self):
        xl = []
        yl = []

        # From C99 std (Sec 6.3.2)
        # XXX: check exceptions raised
        # --- raise for invalid fails.

        # clog(-0 + i0) returns -inf + i pi and raises the 'divide-by-zero'
        # floating-point exception.
        with np.errstate(divide='raise'):
            x = np.array([np.NZERO], dtype=complex)
            y = complex(-np.inf, np.pi)
            assert_raises(FloatingPointError, np.log, x)
        with np.errstate(divide='ignore'):
            assert_almost_equal(np.log(x), y)

        xl.append(x)
        yl.append(y)

        # clog(+0 + i0) returns -inf + i0 and raises the 'divide-by-zero'
        # floating-point exception.
        with np.errstate(divide='raise'):
            x = np.array([0], dtype=complex)
            y = complex(-np.inf, 0)
            assert_raises(FloatingPointError, np.log, x)
        with np.errstate(divide='ignore'):
            assert_almost_equal(np.log(x), y)

        xl.append(x)
        yl.append(y)

        # clog(x + i inf returns +inf + i pi /2, for finite x.
        x = np.array([complex(1, np.inf)], dtype=complex)
        y = complex(np.inf, 0.5 * np.pi)
        assert_almost_equal(np.log(x), y)
        xl.append(x)
        yl.append(y)

        x = np.array([complex(-1, np.inf)], dtype=complex)
        assert_almost_equal(np.log(x), y)
        xl.append(x)
        yl.append(y)

        # clog(x + iNaN) returns NaN + iNaN and optionally raises the
        # 'invalid' floating- point exception, for finite x.
        with np.errstate(invalid='raise'):
            x = np.array([complex(1., np.nan)], dtype=complex)
            y = complex(np.nan, np.nan)
            #assert_raises(FloatingPointError, np.log, x)
        with np.errstate(invalid='ignore'):
            assert_almost_equal(np.log(x), y)

        xl.append(x)
        yl.append(y)

        with np.errstate(invalid='raise'):
            x = np.array([np.inf + 1j * np.nan], dtype=complex)
            #assert_raises(FloatingPointError, np.log, x)
        with np.errstate(invalid='ignore'):
            assert_almost_equal(np.log(x), y)

        xl.append(x)
        yl.append(y)

        # clog(- inf + iy) returns +inf + ipi , for finite positive-signed y.
        x = np.array([-np.inf + 1j], dtype=complex)
        y = complex(np.inf, np.pi)
        assert_almost_equal(np.log(x), y)
        xl.append(x)
        yl.append(y)

        # clog(+ inf + iy) returns +inf + i0, for finite positive-signed y.
        x = np.array([np.inf + 1j], dtype=complex)
        y = complex(np.inf, 0)
        assert_almost_equal(np.log(x), y)
        xl.append(x)
        yl.append(y)

        # clog(- inf + i inf) returns +inf + i3pi /4.
        x = np.array([complex(-np.inf, np.inf)], dtype=complex)
        y = complex(np.inf, 0.75 * np.pi)
        assert_almost_equal(np.log(x), y)
        xl.append(x)
        yl.append(y)

        # clog(+ inf + i inf) returns +inf + ipi /4.
        x = np.array([complex(np.inf, np.inf)], dtype=complex)
        y = complex(np.inf, 0.25 * np.pi)
        assert_almost_equal(np.log(x), y)
        xl.append(x)
        yl.append(y)

        # clog(+/- inf + iNaN) returns +inf + iNaN.
        x = np.array([complex(np.inf, np.nan)], dtype=complex)
        y = complex(np.inf, np.nan)
        assert_almost_equal(np.log(x), y)
        xl.append(x)
        yl.append(y)

        x = np.array([complex(-np.inf, np.nan)], dtype=complex)
        assert_almost_equal(np.log(x), y)
        xl.append(x)
        yl.append(y)

        # clog(NaN + iy) returns NaN + iNaN and optionally raises the
        # 'invalid' floating-point exception, for finite y.
        x = np.array([complex(np.nan, 1)], dtype=complex)
        y = complex(np.nan, np.nan)
        assert_almost_equal(np.log(x), y)
        xl.append(x)
        yl.append(y)

        # clog(NaN + i inf) returns +inf + iNaN.
        x = np.array([complex(np.nan, np.inf)], dtype=complex)
        y = complex(np.inf, np.nan)
        assert_almost_equal(np.log(x), y)
        xl.append(x)
        yl.append(y)

        # clog(NaN + iNaN) returns NaN + iNaN.
        x = np.array([complex(np.nan, np.nan)], dtype=complex)
        y = complex(np.nan, np.nan)
        assert_almost_equal(np.log(x), y)
        xl.append(x)
        yl.append(y)

        # clog(conj(z)) = conj(clog(z)).
        xa = np.array(xl, dtype=complex)
        ya = np.array(yl, dtype=complex)
        with np.errstate(divide='ignore'):
            for i in range(len(xa)):
                assert_almost_equal(np.log(xa[i].conj()), ya[i].conj())
Example #50
0
def check_real_value(f, x1, y1, x, exact=True):
    z1 = np.array([complex(x1, y1)])
    if exact:
        assert_equal(f(z1), x)
    else:
        assert_almost_equal(f(z1), x)
Example #51
0
    def test_degrade_widemask_and(self):
        """
        Test HealSparse.degrade AND functionality with WIDE_MASK
        """

        nside_coverage = 32
        nside_map = 256
        nside_map2 = 64
        sparse_map = healsparse.HealSparseMap.make_empty(nside_coverage,
                                                         nside_map,
                                                         WIDE_MASK,
                                                         wide_mask_maxbits=7)
        sparse_map_and = healsparse.HealSparseMap.make_empty(
            nside_coverage, nside_map2, WIDE_MASK, wide_mask_maxbits=7)
        # Fill some pixels in the "high-resolution" map
        pixel = np.arange(0, 1024)
        pixel = np.concatenate([pixel[:512], pixel[512::3]]).ravel()
        sparse_map.set_bits_pix(pixel, [4])

        # Check which pixels will be full in the "low-resolution" map and fill them
        pixel2_all = np.unique(
            np.right_shift(
                pixel,
                healsparse.utils._compute_bitshift(nside_map2, nside_map)))
        sparse_map_and.set_bits_pix(pixel2_all, [4])

        # Get the pixel number of the bad pixels
        pixel2_bad = np.unique(
            np.right_shift(
                pixel[512:],
                healsparse.utils._compute_bitshift(nside_map2, nside_map)))
        sparse_map_and.clear_bits_pix(pixel2_bad, [4])

        # Degrade with and
        sparse_map_test = sparse_map.degrade(nside_map2, reduction='and')

        # Check the results
        testing.assert_almost_equal(sparse_map_and._sparse_map,
                                    sparse_map_test._sparse_map)

        # Repeat for maxbits > 8
        sparse_map = healsparse.HealSparseMap.make_empty(nside_coverage,
                                                         nside_map,
                                                         WIDE_MASK,
                                                         wide_mask_maxbits=16)
        sparse_map_and = healsparse.HealSparseMap.make_empty(
            nside_coverage, nside_map2, WIDE_MASK, wide_mask_maxbits=16)
        # Fill some pixels in the "high-resolution" map
        pixel = np.arange(0, 1024)
        pixel = np.concatenate([pixel[:512], pixel[512::3]]).ravel()
        sparse_map.set_bits_pix(pixel, [4, 12])
        sparse_map.clear_bits_pix(pixel[:16],
                                  [4])  # set low value in the first pixel

        # Check which pixels will be full in the "low-resolution" map and fill them
        # Note that we are filling more than the ones that are going to be True
        # since we want to preserve the coverage_map
        pixel2_all = np.unique(
            np.right_shift(
                pixel,
                healsparse.utils._compute_bitshift(nside_map2, nside_map)))
        sparse_map_and.set_bits_pix(pixel2_all, [4, 12])

        # Get the pixel number of the bad pixels
        pixel2_bad = np.unique(
            np.right_shift(
                pixel[512:],
                healsparse.utils._compute_bitshift(nside_map2, nside_map)))
        sparse_map_and.clear_bits_pix(pixel2_bad, [4, 12])
        sparse_map_and.clear_bits_pix(pixel2_all[0],
                                      [4])  # set low value in the first pixel

        # Degrade with and
        sparse_map_test = sparse_map.degrade(nside_map2, reduction='and')

        # Check the results
        testing.assert_almost_equal(sparse_map_and._sparse_map,
                                    sparse_map_test._sparse_map)
Example #52
0
 def test_simple(self):
     x = np.array([1 + 1j, 0 + 2j, 1 + 2j, np.inf, np.nan])
     y_r = x**2
     y = np.power(x, 2)
     for i in range(len(x)):
         assert_almost_equal(y[i], y_r[i])
Example #53
0
    def test_degrade_map_recarray(self):
        """
        Test HealSparse.degrade functionality with recarray quantities
        """
        random.seed(seed=12345)

        nside_coverage = 32
        nside_map = 1024
        nside_new = 256

        dtype = [('col1', 'f8'), ('col2', 'f8'), ('col3', 'i4')]
        sparse_map = healsparse.HealSparseMap.make_empty(nside_coverage,
                                                         nside_map,
                                                         dtype,
                                                         primary='col1')
        pixel = np.arange(20000)
        values = np.zeros_like(pixel, dtype=dtype)
        values['col1'] = random.random(size=pixel.size)
        values['col2'] = random.random(size=pixel.size)
        values['col3'] = random.poisson(size=pixel.size, lam=2)
        sparse_map.update_values_pix(pixel, values)

        ra, dec = hp.pix2ang(nside_map, pixel, nest=True, lonlat=True)

        # Make the test values
        hpmap_col1 = np.zeros(hp.nside2npix(nside_map)) + hp.UNSEEN
        hpmap_col2 = np.zeros(hp.nside2npix(nside_map)) + hp.UNSEEN
        hpmap_col3 = np.zeros(hp.nside2npix(nside_map)) + hp.UNSEEN
        hpmap_col1[pixel] = values['col1']
        hpmap_col2[pixel] = values['col2']
        hpmap_col3[pixel] = values['col3']

        # Degrade healpix maps
        hpmap_col1 = hp.ud_grade(hpmap_col1,
                                 nside_out=nside_new,
                                 order_in='NESTED',
                                 order_out='NESTED')
        hpmap_col2 = hp.ud_grade(hpmap_col2,
                                 nside_out=nside_new,
                                 order_in='NESTED',
                                 order_out='NESTED')
        hpmap_col3 = hp.ud_grade(hpmap_col3,
                                 nside_out=nside_new,
                                 order_in='NESTED',
                                 order_out='NESTED')
        ipnest_test = hp.ang2pix(nside_new, ra, dec, nest=True, lonlat=True)

        # Degrade the old map
        new_map = sparse_map.degrade(nside_out=nside_new)
        testing.assert_almost_equal(
            new_map.get_values_pos(ra, dec, lonlat=True)['col1'],
            hpmap_col1[ipnest_test])
        testing.assert_almost_equal(
            new_map.get_values_pos(ra, dec, lonlat=True)['col2'],
            hpmap_col2[ipnest_test])
        testing.assert_almost_equal(
            new_map.get_values_pos(ra, dec, lonlat=True)['col3'],
            hpmap_col3[ipnest_test])

        # Test degrade-on-read
        self.test_dir = tempfile.mkdtemp(dir='./', prefix='TestHealSparse-')

        fname = os.path.join(self.test_dir, 'test_recarray_degrade.hs')
        sparse_map.write(fname)

        new_map2 = healsparse.HealSparseMap.read(fname,
                                                 degrade_nside=nside_new)

        testing.assert_almost_equal(
            new_map2.get_values_pos(ra, dec, lonlat=True)['col1'],
            hpmap_col1[ipnest_test])
        testing.assert_almost_equal(
            new_map2.get_values_pos(ra, dec, lonlat=True)['col2'],
            hpmap_col2[ipnest_test])
        testing.assert_almost_equal(
            new_map2.get_values_pos(ra, dec, lonlat=True)['col3'],
            hpmap_col3[ipnest_test])
Example #54
0
 def test_simple(self):
     x = np.array([1 + 0j, 1 + 2j])
     y_r = np.log(np.abs(x)) + 1j * np.angle(x)
     y = np.log(x)
     for i in range(len(x)):
         assert_almost_equal(y[i], y_r[i])
Example #55
0
 def test_add_stochastic_noise_additive(self):
     arr = np.zeros_like(tst_arrL())
     add_stochastic_noise(10, tst_dimL(), arr, 1)
     testing.assert_almost_equal(np.mean(arr), 1, decimal=4)
Example #56
0
    def test_degrade_widemask_or(self):
        """
        Test HealSparse.degrade OR functionality with WIDE_MASK
        """

        nside_coverage = 32
        nside_map = 256
        nside_map2 = 64
        sparse_map = healsparse.HealSparseMap.make_empty(nside_coverage,
                                                         nside_map,
                                                         WIDE_MASK,
                                                         wide_mask_maxbits=7)
        sparse_map_or = healsparse.HealSparseMap.make_empty(
            nside_coverage, nside_map2, WIDE_MASK, wide_mask_maxbits=7)
        # Fill some pixels in the "high-resolution" map
        pixel = np.arange(4000, 8000)
        sparse_map.set_bits_pix(pixel, [4])

        # Check which pixels will be full in the "low-resolution" map and fill them
        pixel2 = np.unique(
            np.right_shift(
                pixel,
                healsparse.utils._compute_bitshift(nside_map2, nside_map)))
        sparse_map_or.set_bits_pix(pixel2, [4])

        # Degrade with or
        sparse_map_test = sparse_map.degrade(nside_map2, reduction='or')

        # Check the results
        testing.assert_almost_equal(sparse_map_or._sparse_map,
                                    sparse_map_test._sparse_map)

        # Repeat for maxbits > 8
        sparse_map = healsparse.HealSparseMap.make_empty(nside_coverage,
                                                         nside_map,
                                                         WIDE_MASK,
                                                         wide_mask_maxbits=16)
        sparse_map_or = healsparse.HealSparseMap.make_empty(
            nside_coverage, nside_map2, WIDE_MASK, wide_mask_maxbits=16)
        # Fill some pixels in the "high-resolution" map
        pixel = np.arange(0, 1024)
        pixel = np.concatenate([pixel[:512], pixel[512::3]]).ravel()
        sparse_map.set_bits_pix(pixel, [4, 12])
        sparse_map.clear_bits_pix(pixel[:16],
                                  [4])  # set low value in the first pixel

        # Check which pixels will be full in the "low-resolution" map and fill them
        # Note that we are filling more than the ones that are going to be True
        # since we want to preserve the coverage_map
        pixel2_all = np.unique(
            np.right_shift(
                pixel,
                healsparse.utils._compute_bitshift(nside_map2, nside_map)))
        sparse_map_or.set_bits_pix(pixel2_all, [4, 12])

        # Get the pixel number of the bad pixels
        pixel2_bad = np.array([0])
        sparse_map_or.clear_bits_pix(pixel2_bad,
                                     [4])  # set low value in the first pixel

        # Degrade with or
        sparse_map_test = sparse_map.degrade(nside_map2, reduction='or')

        # Check the results
        testing.assert_almost_equal(sparse_map_test._sparse_map,
                                    sparse_map_or._sparse_map)

        # Test degrade-on-read
        self.test_dir = tempfile.mkdtemp(dir='./', prefix='TestHealSparse-')

        fname = os.path.join(self.test_dir, 'test_wide_degrade.hs')
        sparse_map.write(fname)

        sparse_map_test2 = healsparse.HealSparseMap.read(
            fname, degrade_nside=nside_map2, reduction='or')
        testing.assert_almost_equal(sparse_map_test2._sparse_map,
                                    sparse_map_or._sparse_map)
Example #57
0
def test_bundle_maps():
    scene = window.Scene()
    bundle = fornix_streamlines()
    bundle, shift = center_streamlines(bundle)

    mat = np.array([[1, 0, 0, 100], [0, 1, 0, 100], [0, 0, 1, 100],
                    [0, 0, 0, 1.]])

    bundle = transform_streamlines(bundle, mat)

    # metric = np.random.rand(*(200, 200, 200))
    metric = 100 * np.ones((200, 200, 200))

    # add lower values
    metric[100, :, :] = 100 * 0.5

    # create a nice orange-red colormap
    lut = actor.colormap_lookup_table(scale_range=(0., 100.),
                                      hue_range=(0., 0.1),
                                      saturation_range=(1, 1),
                                      value_range=(1., 1))

    line = actor.line(bundle, metric, linewidth=0.1, lookup_colormap=lut)
    scene.add(line)
    scene.add(actor.scalar_bar(lut, ' '))

    report = window.analyze_scene(scene)

    npt.assert_almost_equal(report.actors, 1)
    # window.show(scene)

    scene.clear()

    nb_points = np.sum([len(b) for b in bundle])
    values = 100 * np.random.rand(nb_points)
    # values[:nb_points/2] = 0

    line = actor.streamtube(bundle, values, linewidth=0.1, lookup_colormap=lut)
    scene.add(line)
    # window.show(scene)

    report = window.analyze_scene(scene)
    npt.assert_equal(report.actors_classnames[0], 'vtkLODActor')

    scene.clear()

    colors = np.random.rand(nb_points, 3)
    # values[:nb_points/2] = 0

    line = actor.line(bundle, colors, linewidth=2)
    scene.add(line)
    # window.show(scene)

    report = window.analyze_scene(scene)
    npt.assert_equal(report.actors_classnames[0], 'vtkLODActor')
    # window.show(scene)

    arr = window.snapshot(scene)
    report2 = window.analyze_snapshot(arr)
    npt.assert_equal(report2.objects, 1)

    # try other input options for colors
    scene.clear()
    actor.line(bundle, (1., 0.5, 0))
    actor.line(bundle, np.arange(len(bundle)))
    actor.line(bundle)
    colors = [np.random.rand(*b.shape) for b in bundle]
    actor.line(bundle, colors=colors)
Example #58
0
 def test_add_stochastic_noise_subtractive(self):
     arr = np.ones_like(tst_arrL())
     add_stochastic_noise(10, tst_dimL(), arr, -1)
     testing.assert_almost_equal(np.mean(arr), 0, decimal=4)
Example #59
0
 def test_coef(self):
     #TODO: check dim of coef
     assert_almost_equal(self.res_ps.coef.ravel(),
                         self.res2.params, decimal=14)
Example #60
0
"""
This tests the random coulomb matrix and checks that the random repetitions still have the same eigen spectrum.
"""

import CoulombMatrix
import ImportData
import numpy.testing as npt
import numpy.linalg as LA
import numpy as np


X, y, Q = ImportData.loadPd_q("dataSets/pbe_b3lyp_Q_test_abs.csv")
CM = CoulombMatrix.CoulombMatrix(matrixX=X)
# CM.generateES()
# CM.generateSCM()
X, y = CM.generateRSCM(y, numRep=5)

# CM.plot(X, 5)
# CM.plot(X, 4)
# CM.plot(X, 3)
# CM.plot(X, 2)


for i in range(11, 15):
    print i
    npt.assert_almost_equal(np.sort(LA.eigvals(np.reshape(X[10], (7, 7)))),np.sort(LA.eigvals(np.reshape(X[i], (7, 7)))), decimal=7)