Example #1
0
    def test_sampling_theta(self):
        nclasses, nannotators, nitems = 3, 5, 5000
        nsamples = 1000

        # create random model (this is our ground truth model)
        true_model = ModelBt.create_initial_state(nclasses, nannotators)
        # create random data
        annotations = true_model.generate_annotations(nitems)

        # create a new model
        model = ModelBt.create_initial_state(nclasses, nannotators)
        # get optimal parameters (to make sure we're at the optimum)
        model.map(annotations)

        # modify parameters, to give false start to sampler
        real_theta = model.theta.copy()
        model.theta = model._random_theta(model.nannotators)
        # save current parameters
        gamma_before, theta_before = model.gamma.copy(), model.theta.copy()
        samples = model.sample_posterior_over_accuracy(
            annotations,
            nsamples,
            burn_in_samples=100,
            thin_samples=2
        )
        # test: the mean of the sampled parameters is the same as the MLE one
        # (up to 3 standard deviations of the estimate sample distribution)
        testing.assert_array_less(np.absolute(samples.mean(0)-real_theta),
                                  3.*samples.std(0))

        # check that original parameters are intact
        testing.assert_equal(model.gamma, gamma_before)
        testing.assert_equal(model.theta, theta_before)
Example #2
0
    def test_sample_distribution(self):
        # check sample_distribution's ability to sample from a
        # fixed beta distribution
        nclasses = 8
        nitems = 1000
        # pick random parameters
        a = np.random.uniform(1.0, 5.0, size=(nclasses,))
        b = np.random.uniform(4.0, 6.0, size=(nclasses,))

        # sample values from a beta distribution with fixed parameters
        values = np.empty((nitems, nclasses))
        for k in range(nclasses):
            values[:, k] = scipy.stats.beta.rvs(a[k], b[k], size=nitems)
        arguments = values

        def beta_likelihood(params, values):
            a = params[:nclasses].copy()
            b = params[nclasses:].copy()
            llhood = 0.0
            for k in range(nclasses):
                llhood += scipy.stats.beta._logpdf(values[:, k], a[k], b[k]).sum()
            return llhood

        x_lower = np.zeros((nclasses * 2,)) + 0.5
        x_upper = np.zeros((nclasses * 2,)) + 8.0
        x0 = np.random.uniform(1.0, 7.5, size=(nclasses * 2,))

        dx = optimize_step_size(beta_likelihood, x0.copy(), arguments, x_lower, x_upper, 1000, 100, 0.3, 0.1)

        njumps = 3000
        samples = sample_distribution(beta_likelihood, x0.copy(), arguments, dx, njumps, x_lower, x_upper)
        samples = samples[100:]

        z = np.absolute((samples.mean(0) - np.r_[a, b]) / samples.std(0))
        testing.assert_array_less(z, 3.0)
Example #3
0
    def test_create_tbg_neural_efficacies(self):
        """ Test the generation of neural efficacies from a truncated
        bi-Gaussian mixture
        """
        m_act = 5.0
        v_act = 0.05
        v_inact = 0.05
        cdef = [Condition(m_act=m_act, v_act=v_act, v_inact=v_inact)]
        npos = 5000
        labels = np.zeros((1, npos), dtype=int)
        labels[0, : npos / 2] = 1
        phy_params = phy.PHY_PARAMS_FRISTON00
        ne = phy.create_tbg_neural_efficacies(phy_params, cdef, labels)

        # check shape consistency:
        self.assertEqual(ne.shape, labels.shape)

        # check that moments are close to theoretical ones
        ne_act = ne[0, np.where(labels[0])]
        ne_inact = ne[0, np.where(labels[0] == 0)]
        m_act_theo = truncnorm.mean(0, phy_params["eps_max"], loc=m_act, scale=v_act ** 0.5)
        v_act_theo = truncnorm.var(0, phy_params["eps_max"], loc=m_act, scale=v_act ** 0.5)
        (ne_act.mean(), m_act_theo)
        npt.assert_approx_equal(ne_act.var(), v_act_theo, significant=2)

        m_inact_theo = truncnorm.mean(0, phy_params["eps_max"], loc=0.0, scale=v_inact ** 0.5)
        v_inact_theo = truncnorm.var(0, phy_params["eps_max"], loc=0.0, scale=v_inact ** 0.5)
        npt.assert_approx_equal(ne_inact.mean(), m_inact_theo, significant=2)
        npt.assert_approx_equal(ne_inact.var(), v_inact_theo, significant=2)
        npt.assert_array_less(ne, phy_params)
        npt.assert_array_less(0.0, ne)
Example #4
0
    def test_logistic_regression(self):

        p = lm._RegressionPlotter("x", "c", data=self.df,
                                  logistic=True, n_boot=self.n_boot)
        _, yhat, _ = p.fit_regression(x_range=(-3, 3))
        npt.assert_array_less(yhat, 1)
        npt.assert_array_less(0, yhat)
Example #5
0
def test_f_graphlasso():
    # whether or not eigendecomposition is used, result should be the same
    assert_almost_equal(fista_gl.f_graphlasso(Theta, S),
                        fista_gl.f_graphlasso(Theta_, S))
    # minimum at max likelihood solution inv(S)
    assert_array_less(fista_gl.f_graphlasso(scipy.linalg.inv(S), S),
                      fista_gl.f_graphlasso(Theta, S))
Example #6
0
def test_glm_any_model():
    nbin = 10
    align = 'hold'
    lag = 0.1 # s

    ds = datasets['small']
    dc = DataCollection(ds.get_files())
    dc.add_unit(ds.get_units()[0], lag)
    bnd = dc.make_binned(nbin=nbin, align=align)
    ntask, nrep, nunit, nbin = bnd.shape

    # make perfect test counts
    # direction-only model
    tp = radians([20, 10]) # degrees
    b0 = log(10) # log(Hz)
    pd = pol2cart(tp)

    drn = kinematics.get_idir(bnd.pos, axis=2)
    rate = exp(dot(drn, pd) + b0)
    window_size = diff(bnd.bin_edges, axis=2)
    count_mean = rate * window_size
    count = poisson(count_mean)
    count = count[:,:,None]
    bnd.set_PSTHs(count)

    # now fit data for pd
    count, pos, time = bnd.get_for_regress()
    bnom, bse_nom = glm_any_model(count, pos, time, model='kd')
    pd_exp = unitvec(bnom['d'])
    tp_exp = cart2pol(pd_exp)
    
    acceptable_err = 0.05 # about 3 degrees absolute error
    err = abs(tp - tp_exp)
    assert_array_less(err, acceptable_err)
Example #7
0
    def test_geweke_positive(self):
        """Confirm Geweke diagnostic is smaller than 1 for a reasonable number of samples."""
        n_samples = 2000
        n_intervals = 20
        switchpoint = self.get_switchpoint(n_samples)

        with pytest.raises(ValueError):
            # first and last must be between 0 and 1
            geweke(switchpoint, first=-0.3, last=1.1, intervals=n_intervals)

        with pytest.raises(ValueError):
            # first and last must add to < 1
            geweke(switchpoint, first=0.3, last=0.7, intervals=n_intervals)

        first = 0.1
        last = 0.7
        # returns (intervalsx2) matrix, with first row start indexes, second
        # z-scores
        z_switch = geweke(switchpoint, first=first,
                          last=last, intervals=n_intervals)
        start = z_switch[:, 0]
        z_scores = z_switch[:, 1]

        # Ensure `intervals` argument is honored
        assert z_switch.shape[0] == n_intervals

        # Start index should not be in the last <last>% of samples
        assert_array_less(start, (1 - last) * n_samples)

        # These z-scores should be small, since there are more samples.
        assert max(abs(z_scores)) < 1
Example #8
0
def test_ellipse_model_estimate_from_data():
    data = np.array([
        [264, 854], [265, 875], [268, 863], [270, 857], [275, 905], [285, 915],
        [305, 925], [324, 934], [335, 764], [336, 915], [345, 925], [345, 945],
        [354, 933], [355, 745], [364, 936], [365, 754], [375, 745], [375, 735],
        [385, 736], [395, 735], [394, 935], [405, 727], [415, 736], [415, 727],
        [425, 727], [426, 929], [435, 735], [444, 933], [445, 735], [455, 724],
        [465, 934], [465, 735], [475, 908], [475, 726], [485, 753], [485, 728],
        [492, 762], [495, 745], [491, 910], [493, 909], [499, 904], [505, 905],
        [504, 747], [515, 743], [516, 752], [524, 855], [525, 844], [525, 885],
        [533, 845], [533, 873], [535, 883], [545, 874], [543, 864], [553, 865],
        [553, 845], [554, 825], [554, 835], [563, 845], [565, 826], [563, 855],
        [563, 795], [565, 735], [573, 778], [572, 815], [574, 804], [575, 665],
        [575, 685], [574, 705], [574, 745], [575, 875], [572, 732], [582, 795],
        [579, 709], [583, 805], [583, 854], [586, 755], [584, 824], [585, 655],
        [581, 718], [586, 844], [585, 915], [587, 905], [594, 824], [593, 855],
        [590, 891], [594, 776], [596, 767], [593, 763], [603, 785], [604, 775],
        [603, 885], [605, 753], [605, 655], [606, 935], [603, 761], [613, 802],
        [613, 945], [613, 965], [615, 693], [617, 665], [623, 962], [624, 972],
        [625, 995], [633, 673], [633, 965], [633, 683], [633, 692], [633, 954],
        [634, 1016], [635, 664], [641, 804], [637, 999], [641, 956], [643, 946],
        [643, 926], [644, 975], [643, 655], [646, 705], [651, 664], [651, 984],
        [647, 665], [651, 715], [651, 725], [651, 734], [647, 809], [651, 825],
        [651, 873], [647, 900], [652, 917], [651, 944], [652, 742], [648, 811],
        [651, 994], [652, 783], [650, 911], [654, 879]])

    # estimate parameters of real data
    model = EllipseModel()
    model.estimate(data)

    # test whether estimated parameters are smaller then 1000, so means stable
    assert_array_less(np.abs(model.params[:4]), np.array([2e3] * 4))
    def test_iris_data_set(self):
        def create_data_entry(line):
            split = line.strip().split(",")
            data_input = np.array([float(str) / 7 for str in split[:-1]]).astype(TYPE)

            classes = ["Iris-setosa", "Iris-versicolor", "Iris-virginica"]
            data_target = np.array([float(split[-1] == class_) for class_ in classes]).astype(TYPE)

            return data_input, data_target

        iris_data_file = open("iris.data")
        data_set = [create_data_entry(line) for line in iris_data_file.readlines() if line.strip()]
        iris_data_file.close()
        random.shuffle(data_set)

        training_set = data_set[:-30]
        test_set = data_set[-30:]

        n = FeedForwardNetwork([4, 50, 3])
        learning_rate = 0.5

        for _ in range(10000):
            training_input, training_target = training_set[random.randrange(0, len(training_set))]
            intermediate_results = {}
            y = n.forward_prop(training_input, intermediate_results)
            dy = mathutils.mse_prime(y, training_target)
            n.back_prop(dy, intermediate_results)
            n.train(learning_rate, intermediate_results)

        errors = [mathutils.mean_squared_error(n.forward_prop(test_input, {}), test_target) for test_input, test_target in test_set]
        mean_squared_error = np.mean(np.square(errors))
        npt.assert_array_less(mean_squared_error, 0.05)
Example #10
0
def test_randomize_onesample_correction():
    """Test that maximum based correction (seems to) work."""
    a = rs.normal(0, 1, (100, 10))
    t_un, p_un = stat.randomize_onesample(a, 1000, corrected=False)
    t_corr, p_corr = stat.randomize_onesample(a, 1000, corrected=True)
    assert_array_equal(t_un, t_corr)
    npt.assert_array_less(p_un, p_corr)
Example #11
0
	def test_random_positions_lim_fraction(self):
		R = uniform(0,1)
		N = uniform(0,1000)

		x = random_positions(R,N)
		r = dot(x,x)
		assert_array_less(r,R**2)
Example #12
0
    def test_minimize_l_bfgs_b_maxfun_interruption(self):
        # gh-6162
        f = optimize.rosen
        g = optimize.rosen_der
        values = []
        x0 = np.ones(7) * 1000

        def objfun(x):
            value = f(x)
            values.append(value)
            return value

        # Look for an interesting test case.
        # Request a maxfun that stops at a particularly bad function
        # evaluation somewhere between 100 and 300 evaluations.
        low, medium, high = 30, 100, 300
        optimize.fmin_l_bfgs_b(objfun, x0, fprime=g, maxfun=high)
        v, k = max((y, i) for i, y in enumerate(values[medium:]))
        maxfun = medium + k
        # If the minimization strategy is reasonable,
        # the minimize() result should not be worse than the best
        # of the first 30 function evaluations.
        target = min(values[:low])
        xmin, fmin, d = optimize.fmin_l_bfgs_b(f, x0, fprime=g, maxfun=maxfun)
        assert_array_less(fmin, target)
Example #13
0
    def test_sample_linf_ball(self):
        """
        Test :meth:`bet.sensitivity.gradients.sample_linf_ball`.
        """
        self.cluster_set = grad.sample_linf_ball(self.input_set_centers,
                self.num_close, self.rvec)

        # Test the method returns the correct dimensions
        self.assertEqual(self.cluster_set._values.shape, ((self.num_close+1) *\
            self.num_centers, self.input_dim))

        # Check the method returns centers followed by the clusters around the
        # first center.
        self.repeat = np.repeat(self.centers, self.num_close, axis=0)
        
        nptest.assert_array_less(np.linalg.norm(self.cluster_set._values[\
            self.num_centers:] - self.repeat, np.inf, axis=1),
            np.max(self.rvec))

        # Check that the samples are in lam_domain
        self.cluster_set.update_bounds()
        left = np.all(np.greater_equal(self.cluster_set._values,
            self.cluster_set._left))
        right = np.all(np.less_equal(self.cluster_set._values,
            self.cluster_set._right))
        assert np.all(np.logical_and(left, right))
Example #14
0
    def test_regularized_nonlinear(self):
        """
        Test gradient descent solver with regularized non-linear acceleration,
        solving problems with L2-norm functions.

        """
        dim = 25
        np.random.seed(0)
        x0 = np.random.rand(dim)
        xstar = np.random.rand(dim)
        x0 = xstar + 5. * (x0 - xstar) / np.linalg.norm(x0 - xstar)

        A = np.random.rand(dim, dim)
        step = 1 / np.linalg.norm(np.dot(A.T, A))

        accel = acceleration.regularized_nonlinear(k=5)
        solver = solvers.gradient_descent(step=step, accel=accel)
        param = {'solver': solver, 'rtol': 0,
                 'maxit': 200, 'verbosity': 'NONE'}

        # L2-norm prox and dummy gradient.
        f1 = functions.norm_l2(lambda_=0.5, A=A, y=np.dot(A, xstar))
        f2 = functions.dummy()
        ret = solvers.solve([f1, f2], x0, **param)
        pctdiff = 100 * np.sum((xstar - ret['sol'])**2) / np.sum(xstar**2)
        nptest.assert_array_less(pctdiff, 1.91)

        # Sanity checks
        accel = acceleration.regularized_nonlinear()
        self.assertRaises(ValueError, accel.__init__, 10, ['not', 'good'])
        self.assertRaises(ValueError, accel.__init__, 10, 'nope')
def test_permuted_ols_check_h0_noeffect_signswap(random_state=0):
    rng = check_random_state(random_state)
    # design parameters
    n_samples = 100
    # create dummy design with no effect
    target_var = rng.randn(n_samples, 1)
    tested_var = np.ones((n_samples, 1))
    # permuted OLS
    # We check that h0 is close to the theoretical distribution, which is
    # known for this simple design (= t(n_samples - dof)).
    perm_ranges = [10, 100, 1000]  # test various number of permutations
    all_kstest_pvals = []
    # we compute the Mean Squared Error between cumulative Density Function
    # as a proof of consistency of the permutation algorithm
    all_mse = []
    for i, n_perm in enumerate(np.repeat(perm_ranges, 10)):
        pval, orig_scores, h0 = permuted_ols(
            tested_var, target_var, model_intercept=False,
            n_perm=n_perm, two_sided_test=False, random_state=i)
        assert_equal(h0.size, n_perm)
        # Kolmogorov-Smirnov test
        kstest_pval = stats.kstest(h0, stats.t(n_samples).cdf)[1]
        all_kstest_pvals.append(kstest_pval)
        mse = np.mean(
            (stats.t(n_samples).cdf(np.sort(h0))
             - np.linspace(0, 1, h0.size + 1)[1:]) ** 2)
        all_mse.append(mse)
    all_kstest_pvals = np.array(all_kstest_pvals).reshape(
        (len(perm_ranges), -1))
    all_mse = np.array(all_mse).reshape((len(perm_ranges), -1))
    # check that a difference between distributions is not rejected by KS test
    assert_array_less(0.01 / (len(perm_ranges) * 10.), all_kstest_pvals)
    # consistency of the algorithm: the more permutations, the less the MSE
    assert_array_less(np.diff(all_mse.mean(1)), 0)
Example #16
0
 def test_minsupport(self):
     # rough sanity checks for convergence
     params = self.res1.params
     x_min = self.res1.endog.min()
     p_min = params[1] + params[2]
     assert_array_less(p_min, x_min)
     assert_almost_equal(p_min, x_min, decimal=2)
def test_lomb_scargle_irregular_multi_freq():
    """Test Lomb-Scargle model features on irregularly-sampled periodic data
    with multiple frequencies, each with a single harmonic. More difficult than
    regularly-sampled case, so we allow parameter estimates to be slightly
    noisy.
    """
    frequencies = WAVE_FREQS
    amplitudes = np.zeros((len(frequencies),4))
    amplitudes[:,0] = [4,2,1]
    phase = 0.1
    times, values, errors = irregular_periodic(frequencies, amplitudes, phase)
    all_lomb = generate_features(times, values, errors, LOMB_SCARGLE_FEATS)

    for i, frequency in enumerate(frequencies):
        npt.assert_allclose(frequency,
                all_lomb['freq{}_freq'.format(i+1)], rtol=1e-2)

    for (i,j), amplitude in np.ndenumerate(amplitudes):
        npt.assert_allclose(amplitude,
                all_lomb['freq{}_amplitude{}'.format(i+1,j+1)],
                rtol=1e-1, atol=1e-1)

    for i in [2,3]:
        npt.assert_allclose(amplitudes[i-1,0] / amplitudes[0,0],
                all_lomb['freq_amplitude_ratio_{}1'.format(i)], atol=2e-2)
        npt.assert_allclose(frequencies[i-1] / frequencies[0],
                all_lomb['freq_frequency_ratio_{}1'.format(i)], atol=5e-2)

    npt.assert_array_less(10., all_lomb['freq1_signif'])
def test_lomb_scargle_irregular_single_freq():
    """Test Lomb-Scargle model features on irregularly-sampled periodic data
    with one frequency/multiple harmonics. More difficult than
    regularly-sampled case, so we allow parameter estimates to be slightly
    noisy.
    """
    frequencies = np.hstack((WAVE_FREQS[0], np.zeros(len(WAVE_FREQS)-1)))
    amplitudes = np.zeros((len(WAVE_FREQS),4))
    amplitudes[0,:] = [8,4,2,1]
    phase = 0.1
    times, values, errors = irregular_periodic(frequencies, amplitudes, phase)
    all_lomb = generate_features(times, values, errors, LOMB_SCARGLE_FEATS)

    # Only test the first (true) frequency; the rest correspond to noise
    npt.assert_allclose(all_lomb['freq1_freq'], frequencies[0], rtol=1e-2)

    # Only test first frequency here; noise gives non-zero amplitudes for residuals
    for j in range(amplitudes.shape[1]):
        npt.assert_allclose(amplitudes[0,j],
                all_lomb['freq1_amplitude{}'.format(j+1)], rtol=5e-2, atol=5e-2)
        if j >= 1:
            npt.assert_allclose(phase*j*(-1**j),
                all_lomb['freq1_rel_phase{}'.format(j+1)], rtol=1e-1, atol=1e-1)

    npt.assert_array_less(10., all_lomb['freq1_signif'])

    # Only one frequency, so this should explain basically all the variance
    npt.assert_allclose(0., all_lomb['freq_varrat'], atol=5e-3)

    npt.assert_allclose(-np.mean(values), all_lomb['freq_y_offset'], rtol=5e-2)
def test_iris():
    """Check consistency on dataset iris."""
    classes = np.unique(iris.target)
    clf_samme = prob_samme = None

    for alg in ['SAMME', 'SAMME.R']:
        clf = AdaBoostClassifier(algorithm=alg)
        clf.fit(iris.data, iris.target)

        assert_array_equal(classes, clf.classes_)
        proba = clf.predict_proba(iris.data)
        if alg == "SAMME":
            clf_samme = clf
            prob_samme = proba
        assert_equal(proba.shape[1], len(classes))
        assert_equal(clf.decision_function(iris.data).shape[1], len(classes))

        score = clf.score(iris.data, iris.target)
        assert score > 0.9, "Failed with algorithm %s and score = %f" % \
            (alg, score)

    # Somewhat hacky regression test: prior to
    # ae7adc880d624615a34bafdb1d75ef67051b8200,
    # predict_proba returned SAMME.R values for SAMME.
    clf_samme.algorithm = "SAMME.R"
    assert_array_less(0,
                      np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
Example #20
0
 def testFrequencyResponse(self):
     testFilter = SignalFilter(400.0, 100.0, btype="lowpass").iir(1, ftype="butter")
     fx0, fy0 = testFilter.frequency_response()
     fx1, fy1 = ChainedFilter(testFilter, repeat=1).frequency_response()
     fx2, fy2 = ChainedFilter(testFilter, repeat=2).frequency_response()
     # Shapes should all match
     assert_equal(fx0.shape, fy0.shape)
     assert_equal(fx1.shape, fy1.shape)
     assert_equal(fx2.shape, fy2.shape)
     assert_equal(fx0.shape, fx1.shape)
     assert_equal(fx1.shape, fx2.shape)
     assert_equal(fy0.shape, fy1.shape)
     assert_equal(fy1.shape, fy2.shape)
     # X should be all equal
     assert_allclose(fx0, fx1)
     assert_allclose(fx1, fx2)
     # Y with repeat=1 should be equal
     assert_allclose(fy0, fy1)
     # fy2 filters more -> should be less
     assert_array_less(fy2, fy1)
     # Check samplerate
     assert_equal(ChainedFilter(testFilter, repeat=1).samplerate, 400.)
     assert_equal(ChainedFilter(testFilter, repeat=2).samplerate, 400.)
     assert_equal(ChainedFilter(testFilter, repeat=3).samplerate, 400.)
     # Check as_samplerate (basic check)
     cf = ChainedFilter(testFilter, repeat=1)
     cf400 = cf.as_samplerate(400.)  # Same samplerate => shortcut
     cf500 = cf.as_samplerate(500.)
     assert_true(cf400 == cf)
     assert_true(cf500 != cf)
Example #21
0
    def test_haar(self):
        # Test that the distribution is constant under rotation
        # Every column should have the same distribution
        # Additionally, the distribution should be invariant under another rotation

        # Generate samples
        dim = 5
        samples = 1000  # Not too many, or the test takes too long
        ks_prob = 0.39  # ...so don't expect much precision
        np.random.seed(518)  # Note that the test is sensitive to seed too
        xs = ortho_group.rvs(dim, size=samples)

        # Dot a few rows (0, 1, 2) with unit vectors (0, 2, 4, 3),
        #   effectively picking off entries in the matrices of xs.
        #   These projections should all have the same disribution,
        #     establishing rotational invariance. We use the two-sided
        #     KS test to confirm this.
        #   We could instead test that angles between random vectors
        #     are uniformly distributed, but the below is sufficient.
        #   It is not feasible to consider all pairs, so pick a few.
        els = ((0,0), (0,2), (1,4), (2,3))
        #proj = {(er, ec): [x[er][ec] for x in xs] for er, ec in els}
        proj = dict(((er, ec), sorted([x[er][ec] for x in xs])) for er, ec in els)
        pairs = [(e0, e1) for e0 in els for e1 in els if e0 > e1]
        ks_tests = [ks_2samp(proj[p0], proj[p1])[1] for (p0, p1) in pairs]
        assert_array_less([ks_prob]*len(pairs), ks_tests)
Example #22
0
def test_mutual_information():
    """ Test the function which returns the mutual information in two
    partitions

    XXX - This test is currently incomplete - it only checks the most basic
    case of MI(x, x)==1, but doesn't do any non-trivial checks.
    """

    # nnod_mod, av_degrees, nmods
    networks = [ [4, [2, 3], [2, 4, 6]],
                 [8, [4, 6], [4, 6, 8]],
                 [40, [20], [2]] ]

    for nnod_mod, av_degrees, nmods in networks:
        for nmod in nmods:
            nnod = nnod_mod*nmod
            for av_degree in av_degrees:
                #make a graph object
                g = mod.random_modular_graph(nnod, nmod, av_degree)

                #Compute the of nodes per module
                nnod_mod = nnod/nmod
                #Make a "correct" partition for the graph
                ppart = mod.perfect_partition(nmod,nnod_mod)

                #graph_out, mod_array =mod.simulated_annealing(g, temperature =
                #temperature,temp_scaling = temp_scaling, tmin=tmin)

                #test the perfect case for now: two of the same partition
                #returns 1
                mi_orig  = mod.mutual_information(ppart,ppart)
                yield npt.assert_equal(mi_orig,1)

                #move one node and test that mutual_information comes out
                #correctly
                graph_partition = mod.GraphPartition(g,ppart)
                graph_partition.node_update(0,0,1)

                mi = mod.mutual_information(ppart,graph_partition.index)
                yield npt.assert_array_less(mi, mi_orig)
                ## NOTE: CORRECTNESS NOT TESTED YET

                #merge modules and check that mutual information comes out
                #correctly/lower
                graph_partition2 = mod.GraphPartition(g,ppart)
                merged_module, e_new, a_new, d,t,m1,m2,x = graph_partition2.compute_module_merge(0,1)
                graph_partition2.apply_module_merge(m1,m2,merged_module,e_new,a_new)
                mi2 = mod.mutual_information(ppart,graph_partition2.index)
                yield npt.assert_array_less(mi2,mi_orig)
                ## NOTE: CORRECTNESS NOT TESTED YET

                #split modules and check that mutual information comes out
                #correclty/lower
                graph_partition3 = mod.GraphPartition(g,ppart)
                n1 = list(graph_partition3.index[0])[::2]
                n2 = list(graph_partition3.index[0])[1::2]
                split_modules,e_new,a_new,d,t,m,n1,n2 = graph_partition3.compute_module_split(0,n1,n2)
                graph_partition3.apply_module_split(m,n1,n2,split_modules,e_new,a_new)
                mi3 = mod.mutual_information(ppart,graph_partition3.index)
                yield npt.assert_array_less(mi3,mi_orig)
Example #23
0
def check_error_convergence(L2_errors):
    # Testing convergence
    for k in arange(1, len(L2_errors)):
        assert_array_less(L2_errors[k], L2_errors[k - 1])

    # Testing convergence this will equal to less than 1% error
    assert_almost_equal(L2_errors[-1], 0.0, decimal=1)
Example #24
0
def test_initialize_defaults():
    model = BmiHeat()
    model.initialize()

    assert_almost_equal(model.get_current_time(), 0.)
    assert_array_less(model.get_value('plate_surface__temperature'), 1.)
    assert_array_less(0., model.get_value('plate_surface__temperature'))
def test_lomb_scargle_regular_single_freq():
    """
    Test Lomb-Scargle model features on regularly-sampled periodic data with one
    frequency/multiple harmonics. Estimated parameters should be very accurate in
    this case.
    """
    frequencies = np.hstack((WAVE_FREQS[0], np.zeros(len(WAVE_FREQS)-1)))
    amplitudes = np.zeros((len(frequencies),4))
    amplitudes[0,:] = [8,4,2,1]
    phase = 0.1
    times, values, errors = regular_periodic(frequencies, amplitudes, phase)
    all_lomb = sft.generate_science_features(times, values, errors,
            lomb_features)

    # Only test the first (true) frequency; the rest correspond to noise
    npt.assert_allclose(all_lomb['freq1_freq'], frequencies[0])

    # Hard-coded value from previous solution
    npt.assert_allclose(0.001996007984, all_lomb['freq1_lambda'], rtol=1e-7)

    for (i,j), amplitude in np.ndenumerate(amplitudes):
        npt.assert_allclose(amplitude,
                all_lomb['freq{}_amplitude{}'.format(i+1,j+1)], rtol=1e-2,
                    atol=1e-2)

    # Only test the first (true) frequency; the rest correspond to noise
    for j in range(1, amplitudes.shape[1]):
        npt.assert_allclose(phase*j*(-1**j),
            all_lomb['freq1_rel_phase{}'.format(j+1)], rtol=1e-2, atol=1e-2)

    # Frequency ratio not relevant since there is only; only test amplitude/signif
    for i in [2,3]:
        npt.assert_allclose(0., all_lomb['freq_amplitude_ratio_{}1'.format(i)], atol=1e-3)

    npt.assert_array_less(10., all_lomb['freq1_signif'])

    # Only one frequency, so this should explain basically all the variance
    npt.assert_allclose(0., all_lomb['freq_varrat'], atol=5e-3)

    # Exactly periodic, so the same minima/maxima should reoccur
    npt.assert_allclose(0., all_lomb['freq_model_max_delta_mags'], atol=1e-6)
    npt.assert_allclose(0., all_lomb['freq_model_min_delta_mags'], atol=1e-6)

    # Linear trend should be zero since the signal is exactly sinusoidal
    npt.assert_allclose(0., all_lomb['linear_trend'], atol=1e-4)

    folded_times = times % 1./(frequencies[0]/2.)
    sort_indices = np.argsort(folded_times)
    folded_times = folded_times[sort_indices]
    folded_values = values[sort_indices]

    # Residuals from doubling period should be much higher
    npt.assert_array_less(10., all_lomb['medperc90_2p_p'])

    # Slopes should be the same for {un,}folded data; use unfolded for stability
    slopes = np.diff(values) / np.diff(times)
    npt.assert_allclose(np.percentile(slopes,10),
        all_lomb['fold2P_slope_10percentile'], rtol=1e-2)
    npt.assert_allclose(np.percentile(slopes,90),
        all_lomb['fold2P_slope_90percentile'], rtol=1e-2)
Example #26
0
def test_randomize_corrmat_correction():
    """Test that FWE correction works."""
    a = rs.randn(3, 20)
    p_mat = algo.randomize_corrmat(a, "upper", False)
    p_mat_corr = algo.randomize_corrmat(a, "upper", True)
    triu = np.triu_indices(3, 1)
    npt.assert_array_less(p_mat[triu], p_mat_corr[triu])
Example #27
0
def test_orthogonal_procrustes():
    np.random.seed(1234)
    for m, n in ((6, 4), (4, 4), (4, 6)):
        # Sample a random target matrix.
        B = np.random.randn(m, n)
        # Sample a random orthogonal matrix
        # by computing eigh of a sampled symmetric matrix.
        X = np.random.randn(n, n)
        w, V = eigh(X.T + X)
        assert_allclose(inv(V), V.T)
        # Compute a matrix with a known orthogonal transformation that gives B.
        A = np.dot(B, V.T)
        # Check that an orthogonal transformation from A to B can be recovered.
        R, s = orthogonal_procrustes(A, B)
        assert_allclose(inv(R), R.T)
        assert_allclose(A.dot(R), B)
        # Create a perturbed input matrix.
        A_perturbed = A + 1e-2 * np.random.randn(m, n)
        # Check that the orthogonal procrustes function can find an orthogonal
        # transformation that is better than the orthogonal transformation
        # computed from the original input matrix.
        R_prime, s = orthogonal_procrustes(A_perturbed, B)
        assert_allclose(inv(R_prime), R_prime.T)
        # Compute the naive and optimal transformations of the perturbed input.
        naive_approx = A_perturbed.dot(R)
        optim_approx = A_perturbed.dot(R_prime)
        # Compute the Frobenius norm errors of the matrix approximations.
        naive_approx_error = norm(naive_approx - B, ord='fro')
        optim_approx_error = norm(optim_approx - B, ord='fro')
        # Check that the orthogonal Procrustes approximation is better.
        assert_array_less(optim_approx_error, naive_approx_error)
Example #28
0
def test_local_standard_deviation():

    # SNR = 20
    mean = 100
    std = 5
    shape = (30, 30, 30, 3)

    for N in [1, 4, 8, 12]:
        noise = 0
        for _ in range(N):
            noise += np.random.normal(mean, std, shape)**2 + np.random.normal(mean, std, shape)**2

        noise = np.sqrt(noise)
        corrected_std = local_standard_deviation(noise) / np.sqrt(xi(mean, std, N))

        # everything less than 10% error of real value?
        assert_array_less(np.abs(std - corrected_std.mean()) / std, 0.1)

    # This estimation has a harder time at low SNR, high coils value, probably due
    # to how the synthetic noise field is computed
    # SNR = 5
    mean = 250
    std = 50
    shape = (30, 30, 30, 3)

    for N in [1, 4, 8, 12]:
        noise = 0
        for _ in range(N):
            noise += np.random.normal(mean, std, shape)**2 + np.random.normal(mean, std, shape)**2

        noise = np.sqrt(noise)
        corrected_std = local_standard_deviation(noise) / np.sqrt(xi(mean, std, N))

        # everything less than 10% error of real value?
        assert_array_less(np.abs(std - corrected_std.mean()) / std, 0.1)
Example #29
0
    def test_dare(self):
        A = matrix([[-0.6, 0],[-0.1, -0.4]])
        Q = matrix([[2, 1],[1, 0]])
        B = matrix([[2, 1],[0, 1]])
        R = matrix([[1, 0],[0, 1]])

        X,L,G = dare(A,B,Q,R)
        # print("The solution obtained is", X)
        assert_array_almost_equal(
            A.T * X * A - X -
            A.T * X * B * solve(B.T * X * B + R, B.T * X * A) + Q, zeros((2,2)))
        assert_array_almost_equal(solve(B.T * X * B + R, B.T * X * A), G)
        # check for stable closed loop
        lam = eigvals(A - B * G)
        assert_array_less(abs(lam), 1.0)

        A = matrix([[1, 0],[-1, 1]])
        Q = matrix([[0, 1],[1, 1]])
        B = matrix([[1],[0]])
        R = 2

        X,L,G = dare(A,B,Q,R)
        # print("The solution obtained is", X)
        assert_array_almost_equal(
            A.T * X * A - X -
            A.T * X * B * solve(B.T *  X * B + R, B.T * X * A) + Q, zeros((2,2)))
        assert_array_almost_equal(B.T * X * A / (B.T * X * B + R), G)
        # check for stable closed loop
        lam = eigvals(A - B * G)
        assert_array_less(abs(lam), 1.0)
def test_lomb_scargle_regular_multi_freq():
    """
    Test Lomb-Scargle model features on regularly-sampled periodic data with
    multiple frequencies, each with a single harmonic. Estimated parameters should
    be very accurate in this case.
    """
    frequencies = WAVE_FREQS
    amplitudes = np.zeros((len(frequencies),4))
    amplitudes[:,0] = [4,2,1]
    phase = 0.1
    times, values, errors = regular_periodic(frequencies, amplitudes, phase)
    all_lomb = sft.generate_science_features(times, values, errors,
            lomb_features)

    for i, frequency in enumerate(frequencies):
        npt.assert_allclose(frequency,
                all_lomb['freq{}_freq'.format(i+1)])

    for (i,j), amplitude in np.ndenumerate(amplitudes):
        npt.assert_allclose(amplitude,
                all_lomb['freq{}_amplitude{}'.format(i+1,j+1)],
                rtol=5e-2, atol=5e-2)

    for i in [2,3]:
        npt.assert_allclose(amplitudes[i-1,0] / amplitudes[0,0],
                all_lomb['freq_amplitude_ratio_{}1'.format(i)], atol=2e-2)

    npt.assert_array_less(10., all_lomb['freq1_signif'])
Example #31
0
def _test_raw_reader(reader,
                     test_preloading=True,
                     test_kwargs=True,
                     boundary_decimal=2,
                     test_scaling=True,
                     test_rank=True,
                     **kwargs):
    """Test reading, writing and slicing of raw classes.

    Parameters
    ----------
    reader : function
        Function to test.
    test_preloading : bool
        Whether not preloading is implemented for the reader. If True, both
        cases and memory mapping to file are tested.
    test_kwargs : dict
        Test _init_kwargs support.
    boundary_decimal : int
        Number of decimals up to which the boundary should match.
    **kwargs :
        Arguments for the reader. Note: Do not use preload as kwarg.
        Use ``test_preloading`` instead.

    Returns
    -------
    raw : instance of Raw
        A preloaded Raw object.
    """
    tempdir = _TempDir()
    rng = np.random.RandomState(0)
    montage = None
    if "montage" in kwargs:
        montage = kwargs['montage']
        del kwargs['montage']
    if test_preloading:
        raw = reader(preload=True, **kwargs)
        rep = repr(raw)
        assert rep.count('<') == 1
        assert rep.count('>') == 1
        if montage is not None:
            raw.set_montage(montage)
        # don't assume the first is preloaded
        buffer_fname = op.join(tempdir, 'buffer')
        picks = rng.permutation(np.arange(len(raw.ch_names) - 1))[:10]
        picks = np.append(picks, len(raw.ch_names) - 1)  # test trigger channel
        bnd = min(int(round(raw.buffer_size_sec * raw.info['sfreq'])),
                  raw.n_times)
        slices = [
            slice(0, bnd),
            slice(bnd - 1, bnd),
            slice(3, bnd),
            slice(3, 300),
            slice(None),
            slice(1, bnd)
        ]
        if raw.n_times >= 2 * bnd:  # at least two complete blocks
            slices += [
                slice(bnd, 2 * bnd),
                slice(bnd, bnd + 1),
                slice(0, bnd + 100)
            ]
        other_raws = [
            reader(preload=buffer_fname, **kwargs),
            reader(preload=False, **kwargs)
        ]
        for sl_time in slices:
            data1, times1 = raw[picks, sl_time]
            for other_raw in other_raws:
                data2, times2 = other_raw[picks, sl_time]
                assert_allclose(data1, data2)
                assert_allclose(times1, times2)

        # test projection vs cals and data units
        other_raw = reader(preload=False, **kwargs)
        other_raw.del_proj()
        eeg = meg = fnirs = False
        if 'eeg' in raw:
            eeg, atol = True, 1e-18
        elif 'grad' in raw:
            meg, atol = 'grad', 1e-24
        elif 'mag' in raw:
            meg, atol = 'mag', 1e-24
        elif 'hbo' in raw:
            fnirs, atol = 'hbo', 1e-10
        elif 'hbr' in raw:
            fnirs, atol = 'hbr', 1e-10
        else:
            assert 'fnirs_cw_amplitude' in raw, 'New channel type necessary?'
            fnirs, atol = 'fnirs_cw_amplitude', 1e-10
        picks = pick_types(other_raw.info, meg=meg, eeg=eeg, fnirs=fnirs)
        col_names = [other_raw.ch_names[pick] for pick in picks]
        proj = np.ones((1, len(picks)))
        proj /= np.sqrt(proj.shape[1])
        proj = Projection(data=dict(data=proj,
                                    nrow=1,
                                    row_names=None,
                                    col_names=col_names,
                                    ncol=len(picks)),
                          active=False)
        assert len(other_raw.info['projs']) == 0
        other_raw.add_proj(proj)
        assert len(other_raw.info['projs']) == 1
        # Orders of projector application, data loading, and reordering
        # equivalent:
        # 1. load->apply->get
        data_load_apply_get = \
            other_raw.copy().load_data().apply_proj().get_data(picks)
        # 2. apply->get (and don't allow apply->pick)
        apply = other_raw.copy().apply_proj()
        data_apply_get = apply.get_data(picks)
        data_apply_get_0 = apply.get_data(picks[0])[0]
        with pytest.raises(RuntimeError, match='loaded'):
            apply.copy().pick(picks[0]).get_data()
        # 3. apply->load->get
        data_apply_load_get = apply.copy().load_data().get_data(picks)
        data_apply_load_get_0, data_apply_load_get_1 = \
            apply.copy().load_data().pick(picks[:2]).get_data()
        # 4. reorder->apply->load->get
        all_picks = np.arange(len(other_raw.ch_names))
        reord = np.concatenate(
            (picks[1::2], picks[0::2], np.setdiff1d(all_picks, picks)))
        rev = np.argsort(reord)
        assert_array_equal(reord[rev], all_picks)
        assert_array_equal(rev[reord], all_picks)
        reorder = other_raw.copy().pick(reord)
        assert reorder.ch_names == [other_raw.ch_names[r] for r in reord]
        assert reorder.ch_names[0] == other_raw.ch_names[picks[1]]
        assert_allclose(reorder.get_data([0]), other_raw.get_data(picks[1]))
        reorder_apply = reorder.copy().apply_proj()
        assert reorder_apply.ch_names == reorder.ch_names
        assert reorder_apply.ch_names[0] == apply.ch_names[picks[1]]
        assert_allclose(reorder_apply.get_data([0]),
                        apply.get_data(picks[1]),
                        atol=1e-18)
        data_reorder_apply_load_get = \
            reorder_apply.load_data().get_data(rev[:len(picks)])
        data_reorder_apply_load_get_1 = \
            reorder_apply.copy().load_data().pick([0]).get_data()[0]
        assert reorder_apply.ch_names[0] == apply.ch_names[picks[1]]
        assert (data_load_apply_get.shape == data_apply_get.shape ==
                data_apply_load_get.shape == data_reorder_apply_load_get.shape)
        del apply
        # first check that our data are (probably) in the right units
        data = data_load_apply_get.copy()
        data = data - np.mean(data, axis=1, keepdims=True)  # can be offsets
        np.abs(data, out=data)
        if test_scaling:
            maxval = atol * 1e16
            assert_array_less(data, maxval)
            minval = atol * 1e6
            assert_array_less(minval, np.median(data))
        else:
            atol = 1e-7 * np.median(data)  # 1e-7 * MAD
        # ranks should all be reduced by 1
        if test_rank == 'less':
            cmp = np.less
        elif test_rank is False:
            cmp = None
        else:  # anything else is like True or 'equal'
            assert test_rank is True or test_rank == 'equal', test_rank
            cmp = np.equal
        rank_load_apply_get = np.linalg.matrix_rank(data_load_apply_get)
        rank_apply_get = np.linalg.matrix_rank(data_apply_get)
        rank_apply_load_get = np.linalg.matrix_rank(data_apply_load_get)
        if cmp is not None:
            assert cmp(rank_load_apply_get, len(col_names) - 1)
            assert cmp(rank_apply_get, len(col_names) - 1)
            assert cmp(rank_apply_load_get, len(col_names) - 1)
        # and they should all match
        t_kw = dict(atol=atol,
                    err_msg='before != after, likely _mult_cal_one prob')
        assert_allclose(data_apply_get[0], data_apply_get_0, **t_kw)
        assert_allclose(data_apply_load_get_1, data_reorder_apply_load_get_1,
                        **t_kw)
        assert_allclose(data_load_apply_get[0], data_apply_load_get_0, **t_kw)
        assert_allclose(data_load_apply_get, data_apply_get, **t_kw)
        assert_allclose(data_load_apply_get, data_apply_load_get, **t_kw)
        if 'eeg' in raw:
            other_raw.del_proj()
            direct = \
                other_raw.copy().load_data().set_eeg_reference().get_data()
            other_raw.set_eeg_reference(projection=True)
            assert len(other_raw.info['projs']) == 1
            this_proj = other_raw.info['projs'][0]['data']
            assert this_proj['col_names'] == col_names
            assert this_proj['data'].shape == proj['data']['data'].shape
            assert_allclose(np.linalg.norm(proj['data']['data']),
                            1.,
                            atol=1e-6)
            assert_allclose(np.linalg.norm(this_proj['data']), 1., atol=1e-6)
            assert_allclose(this_proj['data'], proj['data']['data'])
            proj = other_raw.apply_proj().get_data()
            assert_allclose(proj[picks], data_load_apply_get, atol=1e-10)
            assert_allclose(proj, direct, atol=1e-10, err_msg=t_kw['err_msg'])
    else:
        raw = reader(**kwargs)
    n_samp = len(raw.times)
    assert_named_constants(raw.info)
    # smoke test for gh #9743
    ids = [id(ch['loc']) for ch in raw.info['chs']]
    assert len(set(ids)) == len(ids)

    full_data = raw._data
    assert raw.__class__.__name__ in repr(raw)  # to test repr
    assert raw.info.__class__.__name__ in repr(raw.info)
    assert isinstance(raw.info['dig'], (type(None), list))
    data_max = full_data.max()
    data_min = full_data.min()
    # these limits could be relaxed if we actually find data with
    # huge values (in SI units)
    assert data_max < 1e5
    assert data_min > -1e5
    if isinstance(raw.info['dig'], list):
        for di, d in enumerate(raw.info['dig']):
            assert isinstance(d, DigPoint), (di, d)

    # gh-5604
    meas_date = raw.info['meas_date']
    assert meas_date is None or meas_date >= _stamp_to_dt((0, 0))

    # test repr_html
    assert 'Good channels' in raw.info._repr_html_()

    # test resetting raw
    if test_kwargs:
        raw2 = reader(**raw._init_kwargs)
        assert set(raw.info.keys()) == set(raw2.info.keys())
        assert_array_equal(raw.times, raw2.times)

    # Test saving and reading
    out_fname = op.join(tempdir, 'test_raw.fif')
    raw = concatenate_raws([raw])
    raw.save(out_fname, tmax=raw.times[-1], overwrite=True, buffer_size_sec=1)

    # Test saving with not correct extension
    out_fname_h5 = op.join(tempdir, 'test_raw.h5')
    with pytest.raises(IOError, match='raw must end with .fif or .fif.gz'):
        raw.save(out_fname_h5)

    raw3 = read_raw_fif(out_fname)
    assert_named_constants(raw3.info)
    assert set(raw.info.keys()) == set(raw3.info.keys())
    assert_allclose(raw3[0:20][0], full_data[0:20], rtol=1e-6,
                    atol=1e-20)  # atol is very small but > 0
    assert_allclose(raw.times, raw3.times, atol=1e-6, rtol=1e-6)

    assert not math.isnan(raw3.info['highpass'])
    assert not math.isnan(raw3.info['lowpass'])
    assert not math.isnan(raw.info['highpass'])
    assert not math.isnan(raw.info['lowpass'])

    assert raw3.info['kit_system_id'] == raw.info['kit_system_id']

    # Make sure concatenation works
    first_samp = raw.first_samp
    last_samp = raw.last_samp
    concat_raw = concatenate_raws([raw.copy(), raw])
    assert concat_raw.n_times == 2 * raw.n_times
    assert concat_raw.first_samp == first_samp
    assert concat_raw.last_samp - last_samp + first_samp == last_samp + 1
    idx = np.where(concat_raw.annotations.description == 'BAD boundary')[0]

    expected_bad_boundary_onset = raw._last_time

    assert_array_almost_equal(concat_raw.annotations.onset[idx],
                              expected_bad_boundary_onset,
                              decimal=boundary_decimal)

    if raw.info['meas_id'] is not None:
        for key in ['secs', 'usecs', 'version']:
            assert raw.info['meas_id'][key] == raw3.info['meas_id'][key]
        assert_array_equal(raw.info['meas_id']['machid'],
                           raw3.info['meas_id']['machid'])

    assert isinstance(raw.annotations, Annotations)

    # Make a "soft" test on units: They have to be valid SI units as in
    # mne.io.meas_info.valid_units, but we accept any lower/upper case for now.
    valid_units = _get_valid_units()
    valid_units_lower = [unit.lower() for unit in valid_units]
    if raw._orig_units is not None:
        assert isinstance(raw._orig_units, dict)
        for ch_name, unit in raw._orig_units.items():
            assert unit.lower() in valid_units_lower, ch_name

    # Test picking with and without preload
    if test_preloading:
        preload_kwargs = (dict(preload=True), dict(preload=False))
    else:
        preload_kwargs = (dict(), )
    n_ch = len(raw.ch_names)
    picks = rng.permutation(n_ch)
    for preload_kwarg in preload_kwargs:
        these_kwargs = kwargs.copy()
        these_kwargs.update(preload_kwarg)
        # don't use the same filename or it could create problems
        if isinstance(these_kwargs.get('preload', None), str) and \
                op.isfile(these_kwargs['preload']):
            these_kwargs['preload'] += '-1'
        whole_raw = reader(**these_kwargs)
        print(whole_raw)  # __repr__
        assert n_ch >= 2
        picks_1 = picks[:n_ch // 2]
        picks_2 = picks[n_ch // 2:]
        raw_1 = whole_raw.copy().pick(picks_1)
        raw_2 = whole_raw.copy().pick(picks_2)
        data, times = whole_raw[:]
        data_1, times_1 = raw_1[:]
        data_2, times_2 = raw_2[:]
        assert_array_equal(times, times_1)
        assert_array_equal(data[picks_1], data_1)
        assert_array_equal(
            times,
            times_2,
        )
        assert_array_equal(data[picks_2], data_2)

    # Make sure that writing info to h5 format
    # (all fields should be compatible)
    if check_version('h5io'):
        read_hdf5, write_hdf5 = _import_h5io_funcs()
        fname_h5 = op.join(tempdir, 'info.h5')
        with _writing_info_hdf5(raw.info):
            write_hdf5(fname_h5, raw.info)
        new_info = Info(read_hdf5(fname_h5))
        assert object_diff(new_info, raw.info) == ''

    # Make sure that changing directory does not break anything
    if test_preloading:
        these_kwargs = kwargs.copy()
        key = None
        for key in (
                'fname',
                'input_fname',  # artemis123
                'vhdr_fname',  # BV
                'pdf_fname',  # BTi
                'directory',  # CTF
                'filename',  # nedf
        ):
            try:
                fname = kwargs[key]
            except KeyError:
                key = None
            else:
                break
        # len(kwargs) == 0 for the fake arange reader
        if len(kwargs):
            assert key is not None, sorted(kwargs.keys())
            dirname = op.dirname(fname)
            these_kwargs[key] = op.basename(fname)
            these_kwargs['preload'] = False
            orig_dir = os.getcwd()
            try:
                os.chdir(dirname)
                raw_chdir = reader(**these_kwargs)
            finally:
                os.chdir(orig_dir)
            raw_chdir.load_data()

    # make sure that cropping works (with first_samp shift)
    if n_samp >= 50:  # we crop to this number of samples below
        for t_prop in (0., 0.5):
            _test_raw_crop(reader, t_prop, kwargs)
            if test_preloading:
                use_kwargs = kwargs.copy()
                use_kwargs['preload'] = True
                _test_raw_crop(reader, t_prop, use_kwargs)

    return raw
Example #32
0
def test_mutual_information():
    """ Test the function which returns the mutual information in two
    partitions

    XXX - This test is currently incomplete - it only checks the most basic
    case of MI(x, x)==1, but doesn't do any non-trivial checks.
    """

    # nnod_mod, av_degrees, nmods
    networks = [[4, [2, 3], [2, 4, 6]], [8, [4, 6], [4, 6, 8]],
                [40, [20], [2]]]

    for nnod_mod, av_degrees, nmods in networks:
        for nmod in nmods:
            nnod = nnod_mod * nmod
            for av_degree in av_degrees:
                #make a graph object
                g = mod.random_modular_graph(nnod, nmod, av_degree)

                #Compute the of nodes per module
                nnod_mod = nnod // nmod
                #Make a "correct" partition for the graph
                ppart = mod.perfect_partition(nmod, nnod_mod)

                #graph_out, mod_array =mod.simulated_annealing(g, temperature =
                #temperature,temp_scaling = temp_scaling, tmin=tmin)

                #test the perfect case for now: two of the same partition
                #returns 1
                mi_orig = mod.mutual_information(ppart, ppart)
                npt.assert_equal(mi_orig, 1)

                #move one node and test that mutual_information comes out
                #correctly
                graph_partition = mod.GraphPartition(g, ppart)
                graph_partition.node_update(0, 0, 1)

                mi = mod.mutual_information(ppart, graph_partition.index)
                npt.assert_array_less(mi, mi_orig)
                ## NOTE: CORRECTNESS NOT TESTED YET

                #merge modules and check that mutual information comes out
                #correctly/lower
                graph_partition2 = mod.GraphPartition(g, ppart)
                merged_module, e_new, a_new, d, t, m1, m2, x = graph_partition2.compute_module_merge(
                    0, 1)
                graph_partition2.apply_module_merge(m1, m2, merged_module,
                                                    e_new, a_new)
                mi2 = mod.mutual_information(ppart, graph_partition2.index)
                npt.assert_array_less(mi2, mi_orig)
                ## NOTE: CORRECTNESS NOT TESTED YET

                #split modules and check that mutual information comes out
                #correclty/lower
                graph_partition3 = mod.GraphPartition(g, ppart)
                n1 = set(list(graph_partition3.index[0])[::2])
                n2 = set(list(graph_partition3.index[0])[1::2])

                (split_modules, e_new, a_new, d, t, m, n1,
                 n2) = graph_partition3.compute_module_split(0, n1, n2)
                graph_partition3.apply_module_split(m, n1, n2, split_modules,
                                                    e_new, a_new)
                mi3 = mod.mutual_information(ppart, graph_partition3.index)
                npt.assert_array_less(mi3, mi_orig)
Example #33
0
def _assert_close_in_norm(x, y, rtol, size, rdt):
    # helper function for testing
    err_msg = "size: %s  rdt: %s" % (size, rdt)
    assert_array_less(np.linalg.norm(x - y), rtol * np.linalg.norm(x), err_msg)
Example #34
0
def test_MassBalance():
    # %%
    # set up a 15x15 grid with one open outlet node and low initial elevations.
    nr = 15
    nc = 15
    mg = RasterModelGrid((nr, nc), xy_spacing=10.0)

    z = mg.add_zeros("topographic__elevation", at="node")
    br = mg.add_zeros("bedrock__elevation", at="node")
    soil = mg.add_zeros("soil__depth", at="node")

    mg["node"]["topographic__elevation"] += (
        mg.node_y / 100000 + mg.node_x / 100000 +
        np.random.rand(len(mg.node_y)) / 10000)
    mg.set_closed_boundaries_at_grid_edges(
        bottom_is_closed=True,
        left_is_closed=True,
        right_is_closed=True,
        top_is_closed=True,
    )
    mg.set_watershed_boundary_condition_outlet_id(
        0, mg["node"]["topographic__elevation"], -9999.0)
    soil[:] += 0.0  # initial condition of no soil depth.
    br[:] = z[:]
    z[:] += soil[:]

    # Create a D8 flow handler
    # fa = PriorityFloodFlowRouter(mg, surface="topographic__elevation", flow_metric = 'D8',suppress_out=True)
    # fa.run_one_step()

    # Create a D8 flow handler
    fa = FlowAccumulator(mg,
                         flow_director="D8",
                         depression_finder="DepressionFinderAndRouter")

    # Parameter values for detachment-limited test
    K_br = 0.002
    K_sed = 0.002
    U = 0.0001
    dt = 10.0
    F_f = 0.2  # all detached rock disappears; detachment-ltd end-member
    m_sp = 0.5
    n_sp = 1.0
    v_s = 0.25
    H_star = 0.1

    # Instantiate the Space component...
    sp = SpaceLargeScaleEroder(
        mg,
        K_sed=K_sed,
        K_br=K_br,
        F_f=F_f,
        phi=0.0,
        H_star=H_star,
        v_s=v_s,
        m_sp=m_sp,
        n_sp=n_sp,
        sp_crit_sed=0,
        sp_crit_br=0,
    )
    # Get values before run
    z = mg.at_node["topographic__elevation"]
    br = mg.at_node["bedrock__elevation"]
    H = mg.at_node["soil__depth"]
    cores = mg.core_nodes
    area = mg.cell_area_at_node
    # ... and run it to steady state (10000x1-year timesteps).
    for _ in range(10000):
        fa.run_one_step()
        soil_B = cp.deepcopy(H)
        bed_B = cp.deepcopy(br)
        vol_SSY_riv, V_leaving_riv = sp.run_one_step(dt=dt)
        diff_MB = (np.sum((bed_B[cores] - br[cores]) * area[cores]) + np.sum(
            (soil_B[cores] - H[cores]) * area[cores]) * (1 - sp._phi) -
                   vol_SSY_riv * dt - V_leaving_riv)

        br[mg.core_nodes] += U * dt  # m
        soil[
            0] = 0.0  # enforce 0 soil depth at boundary to keep lowering steady
        z[:] = br[:] + soil[:]

        # Test Every iteration
        testing.assert_array_almost_equal(
            z[cores],
            br[cores] + H[cores],
            decimal=5,
            err_msg=
            "Topography does not equal sum of bedrock and soil! Decrease timestep",
            verbose=True,
        )
        testing.assert_array_less(
            abs(diff_MB),
            1e-8 * mg.number_of_nodes,
            err_msg=
            "Mass balance error SpaceLargeScaleEroder! Try to resolve by becreasing timestep",
            verbose=True,
        )
def run_and_check(n_jobs=0, delete=True, additional_args=[]):
    models_dir = tempfile.mkdtemp(prefix="mhcflurry-test-models")
    hyperparameters_filename = os.path.join(models_dir, "hyperparameters.yaml")
    with open(hyperparameters_filename, "w") as fd:
        json.dump(HYPERPARAMETERS_LIST, fd)

    data_df = pandas.read_csv(
        get_path("data_curated", "curated_training_data.affinity.csv.bz2"))
    selected_data_df = data_df.loc[data_df.allele.str.startswith("HLA-A")]
    selected_data_df.to_csv(os.path.join(models_dir, "_train_data.csv"),
                            index=False)

    args = [
        "mhcflurry-class1-train-pan-allele-models",
        "--data",
        os.path.join(models_dir, "_train_data.csv"),
        "--allele-sequences",
        get_path("allele_sequences", "allele_sequences.csv"),
        "--hyperparameters",
        hyperparameters_filename,
        "--out-models-dir",
        models_dir,
        "--num-jobs",
        str(n_jobs),
        "--num-folds",
        "2",
        "--verbosity",
        "1",
    ] + additional_args
    print("Running with args: %s" % args)
    subprocess.check_call(args)

    # Run model selection
    models_dir_selected = tempfile.mkdtemp(
        prefix="mhcflurry-test-models-selected")
    args = [
        "mhcflurry-class1-select-pan-allele-models",
        "--data",
        os.path.join(models_dir, "train_data.csv.bz2"),
        "--models-dir",
        models_dir,
        "--out-models-dir",
        models_dir_selected,
        "--max-models",
        "1",
        "--num-jobs",
        str(n_jobs),
    ] + additional_args
    print("Running with args: %s" % args)
    subprocess.check_call(args)

    result = Class1AffinityPredictor.load(models_dir_selected,
                                          optimization_level=0)
    assert_equal(len(result.neural_networks), 2)
    predictions = result.predict(peptides=["SLYNTVATL"],
                                 alleles=["HLA-A*02:01"])
    assert_equal(predictions.shape, (1, ))
    assert_array_less(predictions, 1000)

    if delete:
        print("Deleting: %s" % models_dir)
        shutil.rmtree(models_dir)
        shutil.rmtree(models_dir_selected)
Example #36
0
def test_nfw_mass_dependence():
    masses = np.array([1e13, 1e14, 1e15])
    for i in range(len(masses) - 1):
        xi1 = xi.xi_nfw_at_r(ra, masses[i], conc, Omega_m)
        xi2 = xi.xi_nfw_at_r(ra, masses[i + 1], conc, Omega_m)
        npt.assert_array_less(xi1, xi2)
Example #37
0
 def test_spectral_solver_known_solution_gkb_problem_1(self):
     problem = skp.ProcrustesProblem((100, 100, 10, 10), problemnumber=1)
     mysolver = skp.GKBSolver()
     result = mysolver.solve(problem)
     assert_array_less(result.blocksteps, 7)
Example #38
0
 def test_spectral_solver_known_solution_spg_problem_1(self):
     problem = skp.ProcrustesProblem((100, 100, 10, 10), problemnumber=1)
     mysolver = skp.SPGSolver()
     result = mysolver.solve(problem)
     assert_array_less(result.nbiter, 8)
def test_reporting_iir(ftype, btype, order, output):
    """Test IIR filter reporting."""
    fs = 1000.
    l_freq = 1. if btype == 'bandpass' else None
    iir_params = dict(ftype=ftype, order=order, output=output)
    rs = 20 if order == 1 else 80
    if ftype == 'ellip':
        iir_params['rp'] = 3  # dB
        iir_params['rs'] = rs  # attenuation
        pass_tol = np.log10(iir_params['rp']) + 0.01
    else:
        pass_tol = 0.2
    with catch_logging() as log:
        x = create_filter(None,
                          fs,
                          l_freq,
                          40.,
                          method='iir',
                          iir_params=iir_params,
                          verbose=True)
    order_eff = order * (1 + (btype == 'bandpass'))
    if output == 'ba':
        assert len(x['b']) == order_eff + 1
    log = log.getvalue()
    keys = [
        'IIR',
        'zero-phase',
        'two-pass forward and reverse',
        'non-causal',
        btype,
        ftype,
        'Filter order %d' % (order_eff * 2, ),
        'Cutoff ' if btype == 'lowpass' else 'Cutoffs ',
    ]
    dB_decade = -27.74
    if ftype == 'ellip':
        dB_cutoff = -6.0
    elif order == 1 or ftype == 'butter':
        dB_cutoff = -6.02
    else:
        assert ftype == 'bessel'
        assert order == 4
        dB_cutoff = -15.16
    if btype == 'lowpass':
        keys += ['%0.2f dB' % (dB_cutoff, )]
    for key in keys:
        assert key.lower() in log.lower()
    # Verify some of the filter properties
    if output == 'ba':
        w, h = freqz(x['b'], x['a'], worN=10000)
    else:
        w, h = _sosfreqz(x['sos'], worN=10000)
    w *= fs / (2 * np.pi)
    h = np.abs(h)
    # passband
    passes = [np.argmin(np.abs(w - 20))]
    # stopband
    decades = [np.argmin(np.abs(w - 400.))]  # one decade
    # transition
    edges = [np.argmin(np.abs(w - 40.))]
    # put these where they belong based on filter type
    assert w[0] == 0.
    idx_0p1 = np.argmin(np.abs(w - 0.1))
    idx_1 = np.argmin(np.abs(w - 1.))
    if btype == 'bandpass':
        edges += [idx_1]
        decades += [idx_0p1]
    else:
        passes += [idx_0p1, idx_1]

    edge_val = 10**(dB_cutoff / 40.)
    assert_allclose(h[edges], edge_val, atol=0.01)
    assert_allclose(h[passes], 1., atol=pass_tol)
    if ftype == 'butter' and btype == 'lowpass':
        attenuation = dB_decade * order
        assert_allclose(h[decades], 10**(attenuation / 20.), rtol=0.01)
    elif ftype == 'ellip':
        assert_array_less(h[decades], 10**(-rs / 20))
Example #40
0
def assert_quad(value_and_err, tabled_value, errTol=1.5e-8):
    value, err = value_and_err
    assert_allclose(value, tabled_value, atol=err, rtol=0)
    if errTol is not None:
        assert_array_less(err, errTol)
Example #41
0
def test_lcmv():
    """Test LCMV with evoked data and single trials."""
    raw, epochs, evoked, data_cov, noise_cov, label, forward,\
        forward_surf_ori, forward_fixed, forward_vol = _get_data()

    for fwd in [forward, forward_vol]:
        filters = make_lcmv(evoked.info,
                            fwd,
                            data_cov,
                            reg=0.01,
                            noise_cov=noise_cov)
        stc = apply_lcmv(evoked, filters, max_ori_out='signed')
        stc.crop(0.02, None)

        stc_pow = np.sum(np.abs(stc.data), axis=1)
        idx = np.argmax(stc_pow)
        max_stc = stc.data[idx]
        tmax = stc.times[np.argmax(max_stc)]

        assert 0.09 < tmax < 0.12, tmax
        assert 0.9 < np.max(max_stc) < 3., np.max(max_stc)

        if fwd is forward:
            # Test picking normal orientation (surface source space only)
            filters = make_lcmv(evoked.info,
                                forward_surf_ori,
                                data_cov,
                                reg=0.01,
                                noise_cov=noise_cov,
                                pick_ori='normal')
            stc_normal = apply_lcmv(evoked, filters, max_ori_out='signed')
            stc_normal.crop(0.02, None)

            stc_pow = np.sum(np.abs(stc_normal.data), axis=1)
            idx = np.argmax(stc_pow)
            max_stc = stc_normal.data[idx]
            tmax = stc_normal.times[np.argmax(max_stc)]

            assert 0.04 < tmax < 0.12, tmax
            assert 0.4 < np.max(max_stc) < 2., np.max(max_stc)

            # The amplitude of normal orientation results should always be
            # smaller than free orientation results
            assert (np.abs(stc_normal.data) <= stc.data).all()

        # Test picking source orientation maximizing output source power
        filters = make_lcmv(evoked.info,
                            fwd,
                            data_cov,
                            reg=0.01,
                            noise_cov=noise_cov,
                            pick_ori='max-power')
        stc_max_power = apply_lcmv(evoked, filters, max_ori_out='signed')
        stc_max_power.crop(0.02, None)
        stc_pow = np.sum(np.abs(stc_max_power.data), axis=1)
        idx = np.argmax(stc_pow)
        max_stc = np.abs(stc_max_power.data[idx])
        tmax = stc.times[np.argmax(max_stc)]

        assert 0.08 < tmax < 0.12, tmax
        assert 0.8 < np.max(max_stc) < 3., np.max(max_stc)

        stc_max_power.data[:, :] = np.abs(stc_max_power.data)

        if fwd is forward:
            # Maximum output source power orientation results should be
            # similar to free orientation results in areas with channel
            # coverage
            label = mne.read_label(fname_label)
            mean_stc = stc.extract_label_time_course(label,
                                                     fwd['src'],
                                                     mode='mean')
            mean_stc_max_pow = \
                stc_max_power.extract_label_time_course(label, fwd['src'],
                                                        mode='mean')
            assert_array_less(np.abs(mean_stc - mean_stc_max_pow), 0.6)

        # Test NAI weight normalization:
        filters = make_lcmv(evoked.info,
                            fwd,
                            data_cov,
                            reg=0.01,
                            noise_cov=noise_cov,
                            pick_ori='max-power',
                            weight_norm='nai')
        stc_nai = apply_lcmv(evoked, filters, max_ori_out='signed')
        stc_nai.crop(0.02, None)

        # Test whether unit-noise-gain solution is a scaled version of NAI
        pearsoncorr = np.corrcoef(np.concatenate(np.abs(stc_nai.data)),
                                  np.concatenate(stc_max_power.data))
        assert_almost_equal(pearsoncorr[0, 1], 1.)

    # Test sphere head model with unit-noise gain beamformer and orientation
    # selection and rank reduction of the leadfield
    sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=0.080)
    src = mne.setup_volume_source_space(subject=None,
                                        pos=15.,
                                        mri=None,
                                        sphere=(0.0, 0.0, 0.0, 80.0),
                                        bem=None,
                                        mindist=5.0,
                                        exclude=2.0)

    fwd_sphere = mne.make_forward_solution(evoked.info,
                                           trans=None,
                                           src=src,
                                           bem=sphere,
                                           eeg=False,
                                           meg=True)

    # Test that we get an error if not reducing rank
    pytest.raises(ValueError,
                  make_lcmv,
                  evoked.info,
                  fwd_sphere,
                  data_cov,
                  reg=0.1,
                  noise_cov=noise_cov,
                  weight_norm='unit-noise-gain',
                  pick_ori='max-power',
                  reduce_rank=False)

    # Now let's reduce it
    filters = make_lcmv(evoked.info,
                        fwd_sphere,
                        data_cov,
                        reg=0.1,
                        noise_cov=noise_cov,
                        weight_norm='unit-noise-gain',
                        pick_ori='max-power',
                        reduce_rank=True)
    stc_sphere = apply_lcmv(evoked, filters, max_ori_out='signed')
    stc_sphere = np.abs(stc_sphere)
    stc_sphere.crop(0.02, None)

    stc_pow = np.sum(stc_sphere.data, axis=1)
    idx = np.argmax(stc_pow)
    max_stc = stc_sphere.data[idx]
    tmax = stc_sphere.times[np.argmax(max_stc)]

    assert 0.08 < tmax < 0.15, tmax
    assert 0.4 < np.max(max_stc) < 2., np.max(max_stc)

    # Test if spatial filter contains src_type
    assert 'src_type' in filters

    # Test if fixed forward operator is detected when picking normal or
    # max-power orientation
    pytest.raises(ValueError,
                  make_lcmv,
                  evoked.info,
                  forward_fixed,
                  data_cov,
                  reg=0.01,
                  noise_cov=noise_cov,
                  pick_ori='normal')
    pytest.raises(ValueError,
                  make_lcmv,
                  evoked.info,
                  forward_fixed,
                  data_cov,
                  reg=0.01,
                  noise_cov=noise_cov,
                  pick_ori='max-power')

    # Test if non-surface oriented forward operator is detected when picking
    # normal orientation
    pytest.raises(ValueError,
                  make_lcmv,
                  evoked.info,
                  forward,
                  data_cov,
                  reg=0.01,
                  noise_cov=noise_cov,
                  pick_ori='normal')

    # Test if volume forward operator is detected when picking normal
    # orientation
    pytest.raises(ValueError,
                  make_lcmv,
                  evoked.info,
                  forward_vol,
                  data_cov,
                  reg=0.01,
                  noise_cov=noise_cov,
                  pick_ori='normal')

    # Test if missing of noise covariance matrix is detected when more than
    # one channel type is present in the data
    pytest.raises(ValueError,
                  make_lcmv,
                  evoked.info,
                  forward_vol,
                  data_cov=data_cov,
                  reg=0.01,
                  noise_cov=None,
                  pick_ori='max-power')

    # Test if not-yet-implemented orientation selections raise error with
    # neural activity index
    pytest.raises(NotImplementedError,
                  make_lcmv,
                  evoked.info,
                  forward_surf_ori,
                  data_cov,
                  reg=0.01,
                  noise_cov=noise_cov,
                  pick_ori='normal',
                  weight_norm='nai')
    pytest.raises(NotImplementedError,
                  make_lcmv,
                  evoked.info,
                  forward_vol,
                  data_cov,
                  reg=0.01,
                  noise_cov=noise_cov,
                  pick_ori=None,
                  weight_norm='nai')

    # Test if no weight-normalization and max-power source orientation throws
    # an error
    pytest.raises(NotImplementedError,
                  make_lcmv,
                  evoked.info,
                  forward_vol,
                  data_cov,
                  reg=0.01,
                  noise_cov=noise_cov,
                  pick_ori='max-power',
                  weight_norm=None)

    # Test if wrong channel selection is detected in application of filter
    evoked_ch = deepcopy(evoked)
    evoked_ch.pick_channels(evoked_ch.ch_names[1:])
    filters = make_lcmv(evoked.info,
                        forward_vol,
                        data_cov,
                        reg=0.01,
                        noise_cov=noise_cov)
    pytest.raises(ValueError,
                  apply_lcmv,
                  evoked_ch,
                  filters,
                  max_ori_out='signed')

    # Test if discrepancies in channel selection of data and fwd model are
    # handled correctly in apply_lcmv
    # make filter with data where first channel was removed
    filters = make_lcmv(evoked_ch.info,
                        forward_vol,
                        data_cov,
                        reg=0.01,
                        noise_cov=noise_cov)
    # applying that filter to the full data set should automatically exclude
    # this channel from the data
    # also test here that no warnings are thrown - implemented to check whether
    # src should not be None warning occurs
    with pytest.warns(None) as w:
        stc = apply_lcmv(evoked, filters, max_ori_out='signed')
    assert len(w) == 0
    # the result should be equal to applying this filter to a dataset without
    # this channel:
    stc_ch = apply_lcmv(evoked_ch, filters, max_ori_out='signed')
    assert_array_almost_equal(stc.data, stc_ch.data)

    # Test if non-matching SSP projection is detected in application of filter
    raw_proj = deepcopy(raw)
    raw_proj.del_proj()
    pytest.raises(ValueError,
                  apply_lcmv_raw,
                  raw_proj,
                  filters,
                  max_ori_out='signed')

    # Test if setting reduce_rank to True returns a NotImplementedError
    # when no orientation selection is done or pick_ori='normal'
    pytest.raises(NotImplementedError,
                  make_lcmv,
                  evoked.info,
                  forward_vol,
                  data_cov,
                  noise_cov=noise_cov,
                  pick_ori=None,
                  weight_norm='nai',
                  reduce_rank=True)
    pytest.raises(NotImplementedError,
                  make_lcmv,
                  evoked.info,
                  forward_surf_ori,
                  data_cov,
                  noise_cov=noise_cov,
                  pick_ori='normal',
                  weight_norm='nai',
                  reduce_rank=True)

    # Test if spatial filter contains src_type
    assert 'src_type' in filters

    # check whether a filters object without src_type throws expected warning
    del filters['src_type']  # emulate 0.16 behaviour to cause warning
    with pytest.warns(RuntimeWarning,
                      match='spatial filter does not contain '
                      'src_type'):
        apply_lcmv(evoked, filters, max_ori_out='signed')

    # Now test single trial using fixed orientation forward solution
    # so we can compare it to the evoked solution
    filters = make_lcmv(epochs.info,
                        forward_fixed,
                        data_cov,
                        reg=0.01,
                        noise_cov=noise_cov)
    stcs = apply_lcmv_epochs(epochs, filters, max_ori_out='signed')
    stcs_ = apply_lcmv_epochs(epochs,
                              filters,
                              return_generator=True,
                              max_ori_out='signed')
    assert_array_equal(stcs[0].data, advance_iterator(stcs_).data)

    epochs.drop_bad()
    assert (len(epochs.events) == len(stcs))

    # average the single trial estimates
    stc_avg = np.zeros_like(stcs[0].data)
    for this_stc in stcs:
        stc_avg += this_stc.data
    stc_avg /= len(stcs)

    # compare it to the solution using evoked with fixed orientation
    filters = make_lcmv(evoked.info,
                        forward_fixed,
                        data_cov,
                        reg=0.01,
                        noise_cov=noise_cov)
    stc_fixed = apply_lcmv(evoked, filters, max_ori_out='signed')
    assert_array_almost_equal(stc_avg, stc_fixed.data)

    # use a label so we have few source vertices and delayed computation is
    # not used
    filters = make_lcmv(epochs.info,
                        forward_fixed,
                        data_cov,
                        reg=0.01,
                        noise_cov=noise_cov,
                        label=label)
    stcs_label = apply_lcmv_epochs(epochs, filters, max_ori_out='signed')

    assert_array_almost_equal(stcs_label[0].data, stcs[0].in_label(label).data)
def test_binom_rejection_interval():
    # consistency check with binom_test
    # some code duplication but limit checks are different
    alpha = 0.05
    nobs = 200
    prop = 12./20
    alternative='smaller'
    ci_low, ci_upp = smprop.binom_test_reject_interval(prop, nobs, alpha=alpha,
                                                       alternative=alternative)
    assert_equal(ci_upp, nobs)
    pval = smprop.binom_test(ci_low, nobs, prop=prop,
                                  alternative=alternative)
    assert_array_less(pval, alpha)
    pval = smprop.binom_test(ci_low + 1, nobs, prop=prop,
                                  alternative=alternative)
    assert_array_less(alpha, pval)

    alternative='larger'
    ci_low, ci_upp = smprop.binom_test_reject_interval(prop, nobs, alpha=alpha,
                                                       alternative=alternative)
    assert_equal(ci_low, 0)
    pval = smprop.binom_test(ci_upp, nobs, prop=prop,
                                  alternative=alternative)
    assert_array_less(pval, alpha)
    pval = smprop.binom_test(ci_upp - 1, nobs, prop=prop,
                                  alternative=alternative)
    assert_array_less(alpha, pval)

    alternative='two-sided'
    ci_low, ci_upp = smprop.binom_test_reject_interval(prop, nobs, alpha=alpha,
                                                       alternative=alternative)
    pval = smprop.binom_test(ci_upp, nobs, prop=prop,
                                  alternative=alternative)
    assert_array_less(pval, alpha)
    pval = smprop.binom_test(ci_upp - 1, nobs, prop=prop,
                                  alternative=alternative)
    assert_array_less(alpha, pval)
    pval = smprop.binom_test(ci_upp, nobs, prop=prop,
                                  alternative=alternative)
    assert_array_less(pval, alpha)

    pval = smprop.binom_test(ci_upp - 1, nobs, prop=prop,
                                  alternative=alternative)
    assert_array_less(alpha, pval)
Example #43
0
def positive_eig(kernel, X):
    """Assert true if the calculated kernel matrix is valid."""
    K = kernel.fit_transform(X)
    min_eig = np.real(np.min(np.linalg.eig(K)[0]))
    assert_array_less(default_eigvalue_precision, min_eig)
Example #44
0
 def _assert_diff_less(self, array1, array2, threshold):
     npt.assert_array_less(np.abs(array1 - array2), threshold)
Example #45
0
def test_make_lcmv(tmpdir, reg, proj):
    """Test LCMV with evoked data and single trials."""
    raw, epochs, evoked, data_cov, noise_cov, label, forward,\
        forward_surf_ori, forward_fixed, forward_vol = _get_data(proj=proj)

    for fwd in [forward, forward_vol]:
        filters = make_lcmv(evoked.info,
                            fwd,
                            data_cov,
                            reg=reg,
                            noise_cov=noise_cov)
        stc = apply_lcmv(evoked, filters, max_ori_out='signed')
        stc.crop(0.02, None)

        stc_pow = np.sum(np.abs(stc.data), axis=1)
        idx = np.argmax(stc_pow)
        max_stc = stc.data[idx]
        tmax = stc.times[np.argmax(max_stc)]

        assert 0.08 < tmax < 0.14, tmax
        assert 0.9 < np.max(max_stc) < 3., np.max(max_stc)

        if fwd is forward:
            # Test picking normal orientation (surface source space only).
            filters = make_lcmv(evoked.info,
                                forward_surf_ori,
                                data_cov,
                                reg=reg,
                                noise_cov=noise_cov,
                                pick_ori='normal',
                                weight_norm=None)
            stc_normal = apply_lcmv(evoked, filters, max_ori_out='signed')
            stc_normal.crop(0.02, None)

            stc_pow = np.sum(np.abs(stc_normal.data), axis=1)
            idx = np.argmax(stc_pow)
            max_stc = stc_normal.data[idx]
            tmax = stc_normal.times[np.argmax(max_stc)]

            lower = 0.04 if proj else 0.025
            assert lower < tmax < 0.14, tmax
            lower = 3e-7 if proj else 2e-7
            assert lower < np.max(max_stc) < 3e-6, np.max(max_stc)

            # No weight normalization was applied, so the amplitude of normal
            # orientation results should always be smaller than free
            # orientation results.
            assert (np.abs(stc_normal.data) <= stc.data).all()

        # Test picking source orientation maximizing output source power
        filters = make_lcmv(evoked.info,
                            fwd,
                            data_cov,
                            reg=reg,
                            noise_cov=noise_cov,
                            pick_ori='max-power')
        stc_max_power = apply_lcmv(evoked, filters, max_ori_out='signed')
        stc_max_power.crop(0.02, None)
        stc_pow = np.sum(np.abs(stc_max_power.data), axis=1)
        idx = np.argmax(stc_pow)
        max_stc = np.abs(stc_max_power.data[idx])
        tmax = stc.times[np.argmax(max_stc)]

        lower = 0.08 if proj else 0.04
        assert lower < tmax < 0.12, tmax
        assert 0.8 < np.max(max_stc) < 3., np.max(max_stc)

        stc_max_power.data[:, :] = np.abs(stc_max_power.data)

        if fwd is forward:
            # Maximum output source power orientation results should be
            # similar to free orientation results in areas with channel
            # coverage
            label = mne.read_label(fname_label)
            mean_stc = stc.extract_label_time_course(label,
                                                     fwd['src'],
                                                     mode='mean')
            mean_stc_max_pow = \
                stc_max_power.extract_label_time_course(label, fwd['src'],
                                                        mode='mean')
            assert_array_less(np.abs(mean_stc - mean_stc_max_pow), 1.0)

        # Test NAI weight normalization:
        filters = make_lcmv(evoked.info,
                            fwd,
                            data_cov,
                            reg=reg,
                            noise_cov=noise_cov,
                            pick_ori='max-power',
                            weight_norm='nai')
        stc_nai = apply_lcmv(evoked, filters, max_ori_out='signed')
        stc_nai.crop(0.02, None)

        # Test whether unit-noise-gain solution is a scaled version of NAI
        pearsoncorr = np.corrcoef(np.concatenate(np.abs(stc_nai.data)),
                                  np.concatenate(stc_max_power.data))
        assert_almost_equal(pearsoncorr[0, 1], 1.)

    # Test sphere head model with unit-noise gain beamformer and orientation
    # selection and rank reduction of the leadfield
    sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=0.080)
    src = mne.setup_volume_source_space(subject=None,
                                        pos=15.,
                                        mri=None,
                                        sphere=(0.0, 0.0, 0.0, 80.0),
                                        bem=None,
                                        mindist=5.0,
                                        exclude=2.0)

    fwd_sphere = mne.make_forward_solution(evoked.info,
                                           trans=None,
                                           src=src,
                                           bem=sphere,
                                           eeg=False,
                                           meg=True)

    # Test that we get an error if not reducing rank
    with pytest.raises(ValueError):  # Singular matrix or complex spectrum
        make_lcmv(evoked.info,
                  fwd_sphere,
                  data_cov,
                  reg=0.1,
                  noise_cov=noise_cov,
                  weight_norm='unit-noise-gain',
                  pick_ori='max-power',
                  reduce_rank=False,
                  rank='full')

    # Now let's reduce it
    filters = make_lcmv(evoked.info,
                        fwd_sphere,
                        data_cov,
                        reg=0.1,
                        noise_cov=noise_cov,
                        weight_norm='unit-noise-gain',
                        pick_ori='max-power',
                        reduce_rank=True)
    stc_sphere = apply_lcmv(evoked, filters, max_ori_out='signed')
    stc_sphere = np.abs(stc_sphere)
    stc_sphere.crop(0.02, None)

    stc_pow = np.sum(stc_sphere.data, axis=1)
    idx = np.argmax(stc_pow)
    max_stc = stc_sphere.data[idx]
    tmax = stc_sphere.times[np.argmax(max_stc)]

    lower = 0.08 if proj else 0.04
    assert lower < tmax < 0.15, tmax
    assert 0.4 < np.max(max_stc) < 2., np.max(max_stc)

    # Test if spatial filter contains src_type
    assert 'src_type' in filters

    # __repr__
    assert len(evoked.ch_names) == 22
    assert len(evoked.info['projs']) == (4 if proj else 0)
    assert len(evoked.info['bads']) == 2
    rank = 17 if proj else 20
    assert 'LCMV' in repr(filters)
    assert 'unknown subject' not in repr(filters)
    assert '484' in repr(filters)
    assert '20' in repr(filters)
    assert 'rank %s' % rank in repr(filters)

    # I/O
    fname = op.join(str(tmpdir), 'filters.h5')
    with pytest.warns(RuntimeWarning, match='-lcmv.h5'):
        filters.save(fname)
    filters_read = read_beamformer(fname)
    assert isinstance(filters, Beamformer)
    assert isinstance(filters_read, Beamformer)
    # deal with object_diff strictness
    filters_read['rank'] = int(filters_read['rank'])
    filters['rank'] = int(filters['rank'])
    assert object_diff(filters, filters_read) == ''

    # Test if fixed forward operator is detected when picking normal or
    # max-power orientation
    pytest.raises(ValueError,
                  make_lcmv,
                  evoked.info,
                  forward_fixed,
                  data_cov,
                  reg=0.01,
                  noise_cov=noise_cov,
                  pick_ori='normal')
    pytest.raises(ValueError,
                  make_lcmv,
                  evoked.info,
                  forward_fixed,
                  data_cov,
                  reg=0.01,
                  noise_cov=noise_cov,
                  pick_ori='max-power')

    # Test if non-surface oriented forward operator is detected when picking
    # normal orientation
    pytest.raises(ValueError,
                  make_lcmv,
                  evoked.info,
                  forward,
                  data_cov,
                  reg=0.01,
                  noise_cov=noise_cov,
                  pick_ori='normal')

    # Test if volume forward operator is detected when picking normal
    # orientation
    pytest.raises(ValueError,
                  make_lcmv,
                  evoked.info,
                  forward_vol,
                  data_cov,
                  reg=0.01,
                  noise_cov=noise_cov,
                  pick_ori='normal')

    # Test if missing of noise covariance matrix is detected when more than
    # one channel type is present in the data
    pytest.raises(ValueError,
                  make_lcmv,
                  evoked.info,
                  forward_vol,
                  data_cov=data_cov,
                  reg=0.01,
                  noise_cov=None,
                  pick_ori='max-power')

    # Test if wrong channel selection is detected in application of filter
    evoked_ch = deepcopy(evoked)
    evoked_ch.pick_channels(evoked_ch.ch_names[1:])
    filters = make_lcmv(evoked.info,
                        forward_vol,
                        data_cov,
                        reg=0.01,
                        noise_cov=noise_cov)
    pytest.raises(ValueError,
                  apply_lcmv,
                  evoked_ch,
                  filters,
                  max_ori_out='signed')

    # Test if discrepancies in channel selection of data and fwd model are
    # handled correctly in apply_lcmv
    # make filter with data where first channel was removed
    filters = make_lcmv(evoked_ch.info,
                        forward_vol,
                        data_cov,
                        reg=0.01,
                        noise_cov=noise_cov)
    # applying that filter to the full data set should automatically exclude
    # this channel from the data
    # also test here that no warnings are thrown - implemented to check whether
    # src should not be None warning occurs
    with pytest.warns(None) as w:
        stc = apply_lcmv(evoked, filters, max_ori_out='signed')
    assert len(w) == 0
    # the result should be equal to applying this filter to a dataset without
    # this channel:
    stc_ch = apply_lcmv(evoked_ch, filters, max_ori_out='signed')
    assert_array_almost_equal(stc.data, stc_ch.data)

    # Test if non-matching SSP projection is detected in application of filter
    if proj:
        raw_proj = deepcopy(raw)
        raw_proj.del_proj()
        with pytest.raises(ValueError, match='do not match the projections'):
            apply_lcmv_raw(raw_proj, filters, max_ori_out='signed')

    # Test if setting reduce_rank to True returns a NotImplementedError
    # when no orientation selection is done or pick_ori='normal'
    pytest.raises(NotImplementedError,
                  make_lcmv,
                  evoked.info,
                  forward_vol,
                  data_cov,
                  noise_cov=noise_cov,
                  pick_ori=None,
                  weight_norm='nai',
                  reduce_rank=True)
    pytest.raises(NotImplementedError,
                  make_lcmv,
                  evoked.info,
                  forward_surf_ori,
                  data_cov,
                  noise_cov=noise_cov,
                  pick_ori='normal',
                  weight_norm='nai',
                  reduce_rank=True)

    # Test if spatial filter contains src_type
    assert 'src_type' in filters

    # check whether a filters object without src_type throws expected warning
    del filters['src_type']  # emulate 0.16 behaviour to cause warning
    with pytest.warns(RuntimeWarning,
                      match='spatial filter does not contain '
                      'src_type'):
        apply_lcmv(evoked, filters, max_ori_out='signed')

    # Now test single trial using fixed orientation forward solution
    # so we can compare it to the evoked solution
    filters = make_lcmv(epochs.info,
                        forward_fixed,
                        data_cov,
                        reg=0.01,
                        noise_cov=noise_cov)
    stcs = apply_lcmv_epochs(epochs, filters, max_ori_out='signed')
    stcs_ = apply_lcmv_epochs(epochs,
                              filters,
                              return_generator=True,
                              max_ori_out='signed')
    assert_array_equal(stcs[0].data, next(stcs_).data)

    epochs.drop_bad()
    assert (len(epochs.events) == len(stcs))

    # average the single trial estimates
    stc_avg = np.zeros_like(stcs[0].data)
    for this_stc in stcs:
        stc_avg += this_stc.data
    stc_avg /= len(stcs)

    # compare it to the solution using evoked with fixed orientation
    filters = make_lcmv(evoked.info,
                        forward_fixed,
                        data_cov,
                        reg=0.01,
                        noise_cov=noise_cov)
    stc_fixed = apply_lcmv(evoked, filters, max_ori_out='signed')
    assert_array_almost_equal(stc_avg, stc_fixed.data)

    # use a label so we have few source vertices and delayed computation is
    # not used
    filters = make_lcmv(epochs.info,
                        forward_fixed,
                        data_cov,
                        reg=0.01,
                        noise_cov=noise_cov,
                        label=label)
    stcs_label = apply_lcmv_epochs(epochs, filters, max_ori_out='signed')

    assert_array_almost_equal(stcs_label[0].data, stcs[0].in_label(label).data)

    # Test condition where the filters weights are zero. There should not be
    # any divide-by-zero errors
    zero_cov = data_cov.copy()
    zero_cov['data'][:] = 0
    filters = make_lcmv(epochs.info,
                        forward_fixed,
                        zero_cov,
                        reg=0.01,
                        noise_cov=noise_cov)
    assert_array_equal(filters['weights'], 0)

    # Test condition where one channel type is picked
    # (avoid "grad data rank (13) did not match the noise rank (None)")
    data_cov_grad = mne.pick_channels_cov(data_cov, [
        ch_name
        for ch_name in epochs.info['ch_names'] if ch_name.endswith(('2', '3'))
    ])
    assert len(data_cov_grad['names']) > 4
    make_lcmv(epochs.info,
              forward_fixed,
              data_cov_grad,
              reg=0.01,
              noise_cov=noise_cov)
Example #46
0
    def testTetrahedron(self):
        myobj = frame.PlatformFrame(options=self.opt)
        myobj.node_mem2glob = {}
        myobj.node_glob2mem = {}
        myobj.compute(self.inputs, self.outputs)

        # Check NULLs and implied number of nodes / elements
        npt.assert_equal(self.outputs["platform_nodes"][4:, :], NULL)
        npt.assert_equal(self.outputs["platform_Fnode"][4:, :], NULL)
        npt.assert_equal(self.outputs["platform_Rnode"][4:], NULL)
        npt.assert_equal(self.outputs["platform_elem_n1"][6:], NULL)
        npt.assert_equal(self.outputs["platform_elem_n2"][6:], NULL)
        npt.assert_equal(self.outputs["platform_elem_D"][6:], NULL)
        npt.assert_equal(self.outputs["platform_elem_t"][6:], NULL)
        npt.assert_equal(self.outputs["platform_elem_A"][6:], NULL)
        npt.assert_equal(self.outputs["platform_elem_Asx"][6:], NULL)
        npt.assert_equal(self.outputs["platform_elem_Asy"][6:], NULL)
        npt.assert_equal(self.outputs["platform_elem_Ixx"][6:], NULL)
        npt.assert_equal(self.outputs["platform_elem_Iyy"][6:], NULL)
        npt.assert_equal(self.outputs["platform_elem_Izz"][6:], NULL)
        npt.assert_equal(self.outputs["platform_elem_rho"][6:], NULL)
        npt.assert_equal(self.outputs["platform_elem_E"][6:], NULL)
        npt.assert_equal(self.outputs["platform_elem_G"][6:], NULL)
        npt.assert_equal(self.outputs["platform_elem_sigma_y"][6:], NULL)
        npt.assert_equal(self.outputs["platform_elem_Px1"][6:], NULL)
        npt.assert_equal(self.outputs["platform_elem_Py1"][6:], NULL)
        npt.assert_equal(self.outputs["platform_elem_Pz1"][6:], NULL)
        npt.assert_equal(self.outputs["platform_elem_Px2"][6:], NULL)
        npt.assert_equal(self.outputs["platform_elem_Py2"][6:], NULL)
        npt.assert_equal(self.outputs["platform_elem_Pz2"][6:], NULL)
        npt.assert_equal(self.outputs["platform_elem_qdyn"][6:], NULL)

        npt.assert_equal(
            self.outputs["platform_nodes"][:4, :],
            np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.5, 1.0, 0.0],
                      [1.0, 0.0, 0.0]]),
        )
        npt.assert_equal(
            self.outputs["platform_Fnode"][:4, :],
            1e2 * np.array([[0.0, 0.0, 2], [0.0, 0.0, 0.0], [0.0, 0.0, 2],
                            [0.0, 0.0, 2.0]]),
        )
        npt.assert_equal(self.outputs["platform_Rnode"][:4],
                         0.1 * np.r_[3, 5, 5, 4])
        npt.assert_equal(self.outputs["platform_elem_n1"][:6], np.r_[0, 3, 2,
                                                                     0, 3, 2])
        npt.assert_equal(self.outputs["platform_elem_n2"][:6], np.r_[3, 2, 0,
                                                                     1, 1, 1])
        npt.assert_equal(self.outputs["platform_elem_D"][:6], 2.0)
        npt.assert_equal(self.outputs["platform_elem_t"][:6], 0.1)
        npt.assert_equal(self.outputs["platform_elem_A"][:6],
                         0.5 * np.arange(6) + 1)
        npt.assert_equal(self.outputs["platform_elem_Asx"][:6],
                         0.5 * np.arange(6) + 1)
        npt.assert_equal(self.outputs["platform_elem_Asy"][:6],
                         0.5 * np.arange(6) + 1)
        npt.assert_equal(self.outputs["platform_elem_Ixx"][:6],
                         2 * np.arange(6) + 1)
        npt.assert_equal(self.outputs["platform_elem_Iyy"][:6],
                         2 * np.arange(6) + 1)
        npt.assert_equal(self.outputs["platform_elem_Izz"][:6],
                         2 * np.arange(6) + 1)
        # npt.assert_equal(self.outputs["platform_elem_rho"][:6], 3 * np.arange(6)+1)
        npt.assert_equal(self.outputs["platform_elem_E"][:6],
                         3 * np.arange(6) + 1)
        npt.assert_equal(self.outputs["platform_elem_G"][:6],
                         4 * np.arange(6) + 1)
        npt.assert_equal(self.outputs["platform_elem_sigma_y"][:6],
                         5 * np.arange(6) + 1)
        npt.assert_equal(self.outputs["platform_elem_Px1"][:6], 1.0)
        npt.assert_equal(self.outputs["platform_elem_Py1"][:6], 2.0)
        npt.assert_equal(self.outputs["platform_elem_Pz1"][:6], 3.0)
        npt.assert_equal(self.outputs["platform_elem_Px2"][:6], 1.0)
        npt.assert_equal(self.outputs["platform_elem_Py2"][:6], 2.0)
        npt.assert_equal(self.outputs["platform_elem_Pz2"][:6], 3.0)
        npt.assert_equal(self.outputs["platform_elem_qdyn"][:6], 4.0)
        self.assertEqual(self.outputs["platform_displacement"], 6e1)
        centroid = np.array([0.375, 0.25, 0.25])
        R = np.zeros(6)
        R[0] = np.sum((self.inputs["member0:nodes_xyz"][:2, :2].mean(axis=0) -
                       centroid[:2])**2)
        R[1] = np.sum((self.inputs["member1:nodes_xyz"][:2, :2].mean(axis=0) -
                       centroid[:2])**2)
        R[2] = np.sum((self.inputs["member2:nodes_xyz"][:2, :2].mean(axis=0) -
                       centroid[:2])**2)
        R[3] = np.sum((self.inputs["member3:nodes_xyz"][:2, :2].mean(axis=0) -
                       centroid[:2])**2)
        R[4] = np.sum((self.inputs["member4:nodes_xyz"][:2, :2].mean(axis=0) -
                       centroid[:2])**2)
        R[5] = np.sum((self.inputs["member5:nodes_xyz"][:2, :2].mean(axis=0) -
                       centroid[:2])**2)

        npt.assert_equal(self.outputs["platform_center_of_buoyancy"], centroid)
        npt.assert_equal(self.outputs["platform_centroid"], centroid)
        npt.assert_equal(self.outputs["platform_center_of_mass"], centroid)
        self.assertEqual(self.outputs["platform_mass"], 6e3)
        self.assertEqual(self.outputs["platform_ballast_mass"], 6e2)
        self.assertEqual(self.outputs["platform_hull_mass"], 6e3 - 6e2)
        self.assertEqual(self.outputs["platform_cost"], 6 * 2e3)
        self.assertEqual(self.outputs["platform_Awater"], 30)
        self.assertEqual(self.outputs["platform_Iwater"], 6 * 15 + 5 * R.sum())
        npt.assert_equal(self.outputs["platform_added_mass"], 6 * np.arange(6))
        npt.assert_equal(self.outputs["platform_variable_capacity"],
                         10 + np.arange(6))
        npt.assert_array_less(1e2, self.outputs["platform_I_total"])
Example #47
0
 def test_expm_cond_smoke(self):
     np.random.seed(1234)
     for n in range(1, 4):
         A = np.random.randn(n, n)
         kappa = expm_cond(A)
         assert_array_less(0, kappa)
Example #48
0
def test_montage():
    """Test making montages."""
    tempdir = _TempDir()
    inputs = dict(
        sfp='FidNz 0       9.071585155     -2.359754454\n'
        'FidT9 -6.711765       0.040402876     -3.251600355\n'
        'very_very_very_long_name -5.831241498 -4.494821698  4.955347697\n'
        'Cz 0       0       8.899186843',
        csd=
        '// MatLab   Sphere coordinates [degrees]         Cartesian coordinates\n'  # noqa: E501
        '// Label       Theta       Phi    Radius         X         Y         Z       off sphere surface\n'  # noqa: E501
        'E1      37.700     -14.000       1.000    0.7677    0.5934   -0.2419  -0.00000000000000011\n'  # noqa: E501
        'E3      51.700      11.000       1.000    0.6084    0.7704    0.1908   0.00000000000000000\n'  # noqa: E501
        'E31      90.000     -11.000       1.000    0.0000    0.9816   -0.1908   0.00000000000000000\n'  # noqa: E501
        'E61     158.000     -17.200       1.000   -0.8857    0.3579   -0.2957  -0.00000000000000022',  # noqa: E501
        mm_elc=
        '# ASA electrode file\nReferenceLabel  avg\nUnitPosition    mm\n'  # noqa:E501
        'NumberPositions=    68\n'
        'Positions\n'
        '-86.0761 -19.9897 -47.9860\n'
        '85.7939 -20.0093 -48.0310\n'
        '0.0083 86.8110 -39.9830\n'
        '-86.0761 -24.9897 -67.9860\n'
        'Labels\nLPA\nRPA\nNz\nDummy\n',
        m_elc='# ASA electrode file\nReferenceLabel  avg\nUnitPosition    m\n'
        'NumberPositions=    68\nPositions\n-.0860761 -.0199897 -.0479860\n'  # noqa:E501
        '.0857939 -.0200093 -.0480310\n.0000083 .00868110 -.0399830\n'
        '.08 -.02 -.04\n'
        'Labels\nLPA\nRPA\nNz\nDummy\n',
        txt='Site  Theta  Phi\n'
        'Fp1  -92    -72\n'
        'Fp2   92     72\n'
        'very_very_very_long_name       -92     72\n'
        'O2        92    -90\n',
        elp='346\n'
        'EEG\t      F3\t -62.027\t -50.053\t      85\n'
        'EEG\t      Fz\t  45.608\t      90\t      85\n'
        'EEG\t      F4\t   62.01\t  50.103\t      85\n'
        'EEG\t      FCz\t   68.01\t  58.103\t      85\n',
        hpts='eeg Fp1 -95.0 -3. -3.\n'
        'eeg AF7 -1 -1 -3\n'
        'eeg A3 -2 -2 2\n'
        'eeg A 0 0 0',
    )
    # Get actual positions and save them for checking
    # csd comes from the string above, all others come from commit 2fa35d4
    poss = dict(
        sfp=[[0.0, 9.07159, -2.35975], [-6.71176, 0.0404, -3.2516],
             [-5.83124, -4.49482, 4.95535], [0.0, 0.0, 8.89919]],
        mm_elc=[[-0.08608, -0.01999, -0.04799], [0.08579, -0.02001, -0.04803],
                [1e-05, 0.08681, -0.03998], [-0.08608, -0.02499, -0.06799]],
        m_elc=[[-0.08608, -0.01999, -0.04799], [0.08579, -0.02001, -0.04803],
               [1e-05, 0.00868, -0.03998], [0.08, -0.02, -0.04]],
        txt=[[-26.25044, 80.79056, -2.96646], [26.25044, 80.79056, -2.96646],
             [-26.25044, -80.79056, -2.96646], [0.0, -84.94822, -2.96646]],
        elp=[[-48.20043, 57.55106, 39.86971], [0.0, 60.73848, 59.4629],
             [48.1426, 57.58403, 39.89198], [41.64599, 66.91489, 31.8278]],
        hpts=[[-95, -3, -3], [-1, -1., -3.], [-2, -2, 2.], [0, 0, 0]],
    )
    for key, text in inputs.items():
        kind = key.split('_')[-1]
        fname = op.join(tempdir, 'test.' + kind)
        with open(fname, 'w') as fid:
            fid.write(text)
        montage = read_montage(fname)
        if kind in ('sfp', 'txt'):
            assert_true('very_very_very_long_name' in montage.ch_names)
        assert_equal(len(montage.ch_names), 4)
        assert_equal(len(montage.ch_names), len(montage.pos))
        assert_equal(montage.pos.shape, (4, 3))
        assert_equal(montage.kind, 'test')
        if kind == 'csd':
            dtype = [('label', 'S4'), ('theta', 'f8'), ('phi', 'f8'),
                     ('radius', 'f8'), ('x', 'f8'), ('y', 'f8'), ('z', 'f8'),
                     ('off_sph', 'f8')]
            try:
                table = np.loadtxt(fname, skip_header=2, dtype=dtype)
            except TypeError:
                table = np.loadtxt(fname, skiprows=2, dtype=dtype)
            poss['csd'] = np.c_[table['x'], table['y'], table['z']]
        if kind == 'elc':
            # Make sure points are reasonable distance from geometric centroid
            centroid = np.sum(montage.pos, axis=0) / montage.pos.shape[0]
            distance_from_centroid = np.apply_along_axis(
                np.linalg.norm, 1, montage.pos - centroid)
            assert_array_less(distance_from_centroid, 0.2)
            assert_array_less(0.01, distance_from_centroid)
        assert_array_almost_equal(poss[key], montage.pos, 4, err_msg=key)

    # Test reading in different letter case.
    ch_names = [
        "F3", "FZ", "F4", "FC3", "FCz", "FC4", "C3", "CZ", "C4", "CP3", "CPZ",
        "CP4", "P3", "PZ", "P4", "O1", "OZ", "O2"
    ]
    montage = read_montage('standard_1020', ch_names=ch_names)
    assert_array_equal(ch_names, montage.ch_names)

    # test transform
    input_strs = [
        """
    eeg Fp1 -95.0 -31.0 -3.0
    eeg AF7 -81 -59 -3
    eeg AF3 -87 -41 28
    cardinal 2 -91 0 -42
    cardinal 1 0 -91 -42
    cardinal 3 0 91 -42
    """, """
    Fp1 -95.0 -31.0 -3.0
    AF7 -81 -59 -3
    AF3 -87 -41 28
    nasion -91 0 -42
    lpa 0 -91 -42
    rpa 0 91 -42
    """
    ]

    all_fiducials = [['2', '1', '3'], ['nasion', 'lpa', 'rpa']]

    kinds = ['test_fid.hpts', 'test_fid.sfp']

    for kind, fiducials, input_str in zip(kinds, all_fiducials, input_strs):
        fname = op.join(tempdir, kind)
        with open(fname, 'w') as fid:
            fid.write(input_str)
        montage = read_montage(op.join(tempdir, kind), transform=True)

        # check coordinate transformation
        pos = np.array([-95.0, -31.0, -3.0])
        nasion = np.array([-91, 0, -42])
        lpa = np.array([0, -91, -42])
        rpa = np.array([0, 91, -42])
        fids = np.vstack((nasion, lpa, rpa))
        trans = get_ras_to_neuromag_trans(fids[0], fids[1], fids[2])
        pos = apply_trans(trans, pos)
        assert_array_equal(montage.pos[0], pos)
        idx = montage.ch_names.index(fiducials[0])
        assert_array_equal(montage.pos[idx, [0, 2]], [0, 0])
        idx = montage.ch_names.index(fiducials[1])
        assert_array_equal(montage.pos[idx, [1, 2]], [0, 0])
        idx = montage.ch_names.index(fiducials[2])
        assert_array_equal(montage.pos[idx, [1, 2]], [0, 0])
        pos = np.array([-95.0, -31.0, -3.0])
        montage_fname = op.join(tempdir, kind)
        montage = read_montage(montage_fname, unit='mm')
        assert_array_equal(montage.pos[0], pos * 1e-3)

        # test with last
        info = create_info(montage.ch_names, 1e3,
                           ['eeg'] * len(montage.ch_names))
        _set_montage(info, montage)
        pos2 = np.array([c['loc'][:3] for c in info['chs']])
        assert_array_equal(pos2, montage.pos)
        assert_equal(montage.ch_names, info['ch_names'])

        info = create_info(montage.ch_names, 1e3,
                           ['eeg'] * len(montage.ch_names))

        evoked = EvokedArray(data=np.zeros((len(montage.ch_names), 1)),
                             info=info,
                             tmin=0)

        # test return type as well as set montage
        assert_true(isinstance(evoked.set_montage(montage), type(evoked)))

        pos3 = np.array([c['loc'][:3] for c in evoked.info['chs']])
        assert_array_equal(pos3, montage.pos)
        assert_equal(montage.ch_names, evoked.info['ch_names'])

        # Warning should be raised when some EEG are not specified in montage
        with warnings.catch_warnings(record=True) as w:
            info = create_info(montage.ch_names + ['foo', 'bar'], 1e3,
                               ['eeg'] * (len(montage.ch_names) + 2))
            _set_montage(info, montage)
            assert_true(len(w) == 1)

    # Channel names can be treated case insensitive
    with warnings.catch_warnings(record=True) as w:
        info = create_info(['FP1', 'af7', 'AF3'], 1e3, ['eeg'] * 3)
        _set_montage(info, montage)
        assert_true(len(w) == 0)

    # Unless there is a collision in names
    with warnings.catch_warnings(record=True) as w:
        info = create_info(['FP1', 'Fp1', 'AF3'], 1e3, ['eeg'] * 3)
        _set_montage(info, montage)
        assert_true(len(w) == 1)
    with warnings.catch_warnings(record=True) as w:
        montage.ch_names = ['FP1', 'Fp1', 'AF3']
        info = create_info(['fp1', 'AF3'], 1e3, ['eeg', 'eeg'])
        _set_montage(info, montage)
        assert_true(len(w) == 1)
Example #49
0
def test_permuted_ols_check_h0_noeffect_labelswap(random_state=0):
    rng = check_random_state(random_state)
    # design parameters
    n_samples = 100
    # create dummy design with no effect
    target_var = rng.randn(n_samples, 1)
    tested_var = np.arange(n_samples, dtype='f8').reshape((-1, 1))
    tested_var_not_centered = tested_var.copy()
    tested_var -= tested_var.mean(0)  # centered
    # permuted OLS
    # We check that h0 is close to the theoretical distribution, which is
    # known for this simple design (= t(n_samples - dof)).
    perm_ranges = [10, 100, 1000]  # test various number of permutations
    # we use two models (with and without intercept modelling)
    all_kstest_pvals = []
    all_kstest_pvals_intercept = []
    all_kstest_pvals_intercept2 = []
    # we compute the Mean Squared Error between cumulative Density Function
    # as a proof of consistency of the permutation algorithm
    all_mse = []
    all_mse_intercept = []
    all_mse_intercept2 = []
    for i, n_perm in enumerate(np.repeat(perm_ranges, 10)):
        ### Case no. 1: no intercept in the model
        pval, orig_scores, h0 = permuted_ols(
            tested_var, target_var, model_intercept=False,
            n_perm=n_perm, two_sided_test=False, random_state=i)
        assert_equal(h0.size, n_perm)
        # Kolmogorov-Smirnov test
        kstest_pval = stats.kstest(h0, stats.t(n_samples - 1).cdf)[1]
        all_kstest_pvals.append(kstest_pval)
        mse = np.mean(
            (stats.t(n_samples - 1).cdf(np.sort(h0))
             - np.linspace(0, 1, h0.size + 1)[1:]) ** 2)
        all_mse.append(mse)
        ### Case no. 2: intercept in the model
        pval, orig_scores, h0 = permuted_ols(
            tested_var, target_var, model_intercept=True,
            n_perm=n_perm, two_sided_test=False, random_state=i)
        assert_array_less(pval, 1.)  # pval should not be significant
        # Kolmogorov-Smirnov test
        kstest_pval = stats.kstest(h0, stats.t(n_samples - 2).cdf)[1]
        all_kstest_pvals_intercept.append(kstest_pval)
        mse = np.mean(
            (stats.t(n_samples - 2).cdf(np.sort(h0))
             - np.linspace(0, 1, h0.size + 1)[1:]) ** 2)
        all_mse_intercept.append(mse)
        ### Case no. 3: intercept in the model, no centering of tested vars
        pval, orig_scores, h0 = permuted_ols(
            tested_var_not_centered, target_var, model_intercept=True,
            n_perm=n_perm, two_sided_test=False, random_state=i)
        assert_array_less(pval, 1.)  # pval should not be significant
        # Kolmogorov-Smirnov test
        kstest_pval = stats.kstest(h0, stats.t(n_samples - 2).cdf)[1]
        all_kstest_pvals_intercept2.append(kstest_pval)
        mse = np.mean(
            (stats.t(n_samples - 2).cdf(np.sort(h0))
             - np.linspace(0, 1, h0.size + 1)[1:]) ** 2)
        all_mse_intercept2.append(mse)
    all_kstest_pvals = np.array(all_kstest_pvals).reshape(
        (len(perm_ranges), -1))
    all_kstest_pvals_intercept = np.array(all_kstest_pvals_intercept).reshape(
        (len(perm_ranges), -1))
    all_mse = np.array(all_mse).reshape((len(perm_ranges), -1))
    all_mse_intercept = np.array(all_mse_intercept).reshape(
        (len(perm_ranges), -1))
    all_mse_intercept2 = np.array(all_mse_intercept2).reshape(
        (len(perm_ranges), -1))
    # check that a difference between distributions is not rejected by KS test
    assert_array_less(0.01, all_kstest_pvals)
    assert_array_less(0.01, all_kstest_pvals_intercept)
    assert_array_less(0.01, all_kstest_pvals_intercept2)
    # consistency of the algorithm: the more permutations, the less the MSE
    assert_array_less(np.diff(all_mse.mean(1)), 0)
    assert_array_less(np.diff(all_mse_intercept.mean(1)), 0)
    assert_array_less(np.diff(all_mse_intercept2.mean(1)), 0)
Example #50
0
def test_make_lcmv_bem(tmp_path, reg, proj, kind):
    """Test LCMV with evoked data and single trials."""
    raw, epochs, evoked, data_cov, noise_cov, label, forward,\
        forward_surf_ori, forward_fixed, forward_vol = _get_data(proj=proj)

    if kind == 'surface':
        fwd = forward
    else:
        fwd = forward_vol
        assert kind == 'volume'

    filters = make_lcmv(evoked.info,
                        fwd,
                        data_cov,
                        reg=reg,
                        noise_cov=noise_cov)
    stc = apply_lcmv(evoked, filters)
    stc.crop(0.02, None)

    stc_pow = np.sum(np.abs(stc.data), axis=1)
    idx = np.argmax(stc_pow)
    max_stc = stc.data[idx]
    tmax = stc.times[np.argmax(max_stc)]

    assert 0.08 < tmax < 0.15, tmax
    assert 0.9 < np.max(max_stc) < 3.5, np.max(max_stc)

    if kind == 'surface':
        # Test picking normal orientation (surface source space only).
        filters = make_lcmv(evoked.info,
                            forward_surf_ori,
                            data_cov,
                            reg=reg,
                            noise_cov=noise_cov,
                            pick_ori='normal',
                            weight_norm=None)
        stc_normal = apply_lcmv(evoked, filters)
        stc_normal.crop(0.02, None)

        stc_pow = np.sum(np.abs(stc_normal.data), axis=1)
        idx = np.argmax(stc_pow)
        max_stc = stc_normal.data[idx]
        tmax = stc_normal.times[np.argmax(max_stc)]

        lower = 0.04 if proj else 0.025
        assert lower < tmax < 0.14, tmax
        lower = 3e-7 if proj else 2e-7
        assert lower < np.max(max_stc) < 3e-6, np.max(max_stc)

        # No weight normalization was applied, so the amplitude of normal
        # orientation results should always be smaller than free
        # orientation results.
        assert (np.abs(stc_normal.data) <= stc.data).all()

    # Test picking source orientation maximizing output source power
    filters = make_lcmv(evoked.info,
                        fwd,
                        data_cov,
                        reg=reg,
                        noise_cov=noise_cov,
                        pick_ori='max-power')
    stc_max_power = apply_lcmv(evoked, filters)
    stc_max_power.crop(0.02, None)
    stc_pow = np.sum(np.abs(stc_max_power.data), axis=1)
    idx = np.argmax(stc_pow)
    max_stc = np.abs(stc_max_power.data[idx])
    tmax = stc.times[np.argmax(max_stc)]

    lower = 0.08 if proj else 0.04
    assert lower < tmax < 0.15, tmax
    assert 0.8 < np.max(max_stc) < 3., np.max(max_stc)

    stc_max_power.data[:, :] = np.abs(stc_max_power.data)

    if kind == 'surface':
        # Maximum output source power orientation results should be
        # similar to free orientation results in areas with channel
        # coverage
        label = mne.read_label(fname_label)
        mean_stc = stc.extract_label_time_course(label,
                                                 fwd['src'],
                                                 mode='mean')
        mean_stc_max_pow = \
            stc_max_power.extract_label_time_course(
                label, fwd['src'], mode='mean')
        assert_array_less(np.abs(mean_stc - mean_stc_max_pow), 1.0)

    # Test if spatial filter contains src_type
    assert filters['src_type'] == kind

    # __repr__
    assert len(evoked.ch_names) == 22
    assert len(evoked.info['projs']) == (3 if proj else 0)
    assert len(evoked.info['bads']) == 2
    rank = 17 if proj else 20
    assert 'LCMV' in repr(filters)
    assert 'unknown subject' not in repr(filters)
    assert f'{fwd["nsource"]} vert' in repr(filters)
    assert '20 ch' in repr(filters)
    assert 'rank %s' % rank in repr(filters)

    # I/O
    fname = op.join(str(tmp_path), 'filters.h5')
    with pytest.warns(RuntimeWarning, match='-lcmv.h5'):
        filters.save(fname)
    filters_read = read_beamformer(fname)
    assert isinstance(filters, Beamformer)
    assert isinstance(filters_read, Beamformer)
    # deal with object_diff strictness
    filters_read['rank'] = int(filters_read['rank'])
    filters['rank'] = int(filters['rank'])
    assert object_diff(filters, filters_read) == ''

    if kind != 'surface':
        return

    # Test if fixed forward operator is detected when picking normal or
    # max-power orientation
    pytest.raises(ValueError,
                  make_lcmv,
                  evoked.info,
                  forward_fixed,
                  data_cov,
                  reg=0.01,
                  noise_cov=noise_cov,
                  pick_ori='normal')
    pytest.raises(ValueError,
                  make_lcmv,
                  evoked.info,
                  forward_fixed,
                  data_cov,
                  reg=0.01,
                  noise_cov=noise_cov,
                  pick_ori='max-power')

    # Test if non-surface oriented forward operator is detected when picking
    # normal orientation
    pytest.raises(ValueError,
                  make_lcmv,
                  evoked.info,
                  forward,
                  data_cov,
                  reg=0.01,
                  noise_cov=noise_cov,
                  pick_ori='normal')

    # Test if volume forward operator is detected when picking normal
    # orientation
    pytest.raises(ValueError,
                  make_lcmv,
                  evoked.info,
                  forward_vol,
                  data_cov,
                  reg=0.01,
                  noise_cov=noise_cov,
                  pick_ori='normal')

    # Test if missing of noise covariance matrix is detected when more than
    # one channel type is present in the data
    pytest.raises(ValueError,
                  make_lcmv,
                  evoked.info,
                  forward_vol,
                  data_cov=data_cov,
                  reg=0.01,
                  noise_cov=None,
                  pick_ori='max-power')

    # Test if wrong channel selection is detected in application of filter
    evoked_ch = deepcopy(evoked)
    evoked_ch.pick_channels(evoked_ch.ch_names[1:])
    filters = make_lcmv(evoked.info,
                        forward_vol,
                        data_cov,
                        reg=0.01,
                        noise_cov=noise_cov)
    with pytest.deprecated_call(match='max_ori_out'):
        with pytest.raises(ValueError, match='was computed with'):
            apply_lcmv(evoked_ch, filters, max_ori_out='deprecated')

    # Test if discrepancies in channel selection of data and fwd model are
    # handled correctly in apply_lcmv
    # make filter with data where first channel was removed
    filters = make_lcmv(evoked_ch.info,
                        forward_vol,
                        data_cov,
                        reg=0.01,
                        noise_cov=noise_cov)
    # applying that filter to the full data set should automatically exclude
    # this channel from the data
    # also test here that no warnings are thrown - implemented to check whether
    # src should not be None warning occurs
    stc = apply_lcmv(evoked, filters)

    # the result should be equal to applying this filter to a dataset without
    # this channel:
    stc_ch = apply_lcmv(evoked_ch, filters)
    assert_array_almost_equal(stc.data, stc_ch.data)

    # Test if non-matching SSP projection is detected in application of filter
    if proj:
        raw_proj = raw.copy().del_proj()
        with pytest.raises(ValueError, match='do not match the projections'):
            apply_lcmv_raw(raw_proj, filters)

    # Test apply_lcmv_raw
    use_raw = raw.copy().crop(0, 1)
    stc = apply_lcmv_raw(use_raw, filters)
    assert_allclose(stc.times, use_raw.times)
    assert_array_equal(stc.vertices[0], forward_vol['src'][0]['vertno'])

    # Test if spatial filter contains src_type
    assert 'src_type' in filters

    # check whether a filters object without src_type throws expected warning
    del filters['src_type']  # emulate 0.16 behaviour to cause warning
    with pytest.warns(RuntimeWarning,
                      match='spatial filter does not contain '
                      'src_type'):
        apply_lcmv(evoked, filters)

    # Now test single trial using fixed orientation forward solution
    # so we can compare it to the evoked solution
    filters = make_lcmv(epochs.info,
                        forward_fixed,
                        data_cov,
                        reg=0.01,
                        noise_cov=noise_cov)
    stcs = apply_lcmv_epochs(epochs, filters)
    stcs_ = apply_lcmv_epochs(epochs, filters, return_generator=True)
    assert_array_equal(stcs[0].data, next(stcs_).data)

    epochs.drop_bad()
    assert (len(epochs.events) == len(stcs))

    # average the single trial estimates
    stc_avg = np.zeros_like(stcs[0].data)
    for this_stc in stcs:
        stc_avg += this_stc.data
    stc_avg /= len(stcs)

    # compare it to the solution using evoked with fixed orientation
    filters = make_lcmv(evoked.info,
                        forward_fixed,
                        data_cov,
                        reg=0.01,
                        noise_cov=noise_cov)
    stc_fixed = apply_lcmv(evoked, filters)
    assert_array_almost_equal(stc_avg, stc_fixed.data)

    # use a label so we have few source vertices and delayed computation is
    # not used
    filters = make_lcmv(epochs.info,
                        forward_fixed,
                        data_cov,
                        reg=0.01,
                        noise_cov=noise_cov,
                        label=label)
    stcs_label = apply_lcmv_epochs(epochs, filters)

    assert_array_almost_equal(stcs_label[0].data, stcs[0].in_label(label).data)

    # Test condition where the filters weights are zero. There should not be
    # any divide-by-zero errors
    zero_cov = data_cov.copy()
    zero_cov['data'][:] = 0
    filters = make_lcmv(epochs.info,
                        forward_fixed,
                        zero_cov,
                        reg=0.01,
                        noise_cov=noise_cov)
    assert_array_equal(filters['weights'], 0)

    # Test condition where one channel type is picked
    # (avoid "grad data rank (13) did not match the noise rank (None)")
    data_cov_grad = pick_channels_cov(data_cov, [
        ch_name
        for ch_name in epochs.info['ch_names'] if ch_name.endswith(('2', '3'))
    ])
    assert len(data_cov_grad['names']) > 4
    make_lcmv(epochs.info,
              forward_fixed,
              data_cov_grad,
              reg=0.01,
              noise_cov=noise_cov)
Example #51
0
    def test_myknnregressor_predict_with_fit_grid(self):
        """
        Testing the 2-d grid KNN
    
        TEST 1a/1b: testing the correctness of grid KNN, using the same approach as in "test_myknnregressor_predict_with_fit_serial", with few important differences.
                    1) The number of input features is constrained to 2
                    2) In TEST 1a the train and test set are obtained by randomly extracting the points of a 2D grid, using the function "generate_spatially_gaussian_arrays_single_pole"
                    3) In TEST 1b only two instances belong to the test set X_test: [0,0] and [Nx-1,Ny-1], Nx and Ny being  the 2d grid extensions.
                    These two points belong to two different regions
                    
                    
                      ---------|----(Nx-1,Ny-1)
                      |        |        |
                      |    +   |    -   |
                      |    +   |    -   |
                      |    +   |    -   |
                      |    +   |    -   |
                    (0,0)------|--------|

                    where output values of instances are equal in magnitude, but differ by a sign.
                    These data arrays are produced by "generate_spatially_gaussian_arrays_double_pole".
                    
                    The result of knn regression for these two points should be
                    
                    a) KNN[(0,0)]_k = +val, if k<= min(Nx,Ny)^2/4
                       KNN[(Nx-1,Ny-1)]_k = -val, if k<= min(Nx,Ny)^2/4
    
                        where min(Nx,Ny)=Nx if Nx<Ny, Ny otherwise
                        
                    b) |KNN[(0,0)]_k| <val, if   M <= k<= Nx*Ny-2, with large enough M
                    Here, a less rigorous approach has to be adopted, since the prediction outcome
                    strictly depends on the linear dimensions of the system and how the different shells 
                    of neighbors get created.
                       
        """

        print("\n testing predict method using grid knn regressor")

        sizes = [[5, 5], [3, 18], [10, 10]]
        n_output_feat = [1, 3, 6]
        y_val = -.6

        size_train = []
        for i in sizes:
            size_train.append(i[0] * i[1] * 3 // 4)

        for a, j in zip(size_train, sizes):

            for i in range(1, 5):
                extensions = np.array(j)
                my_knn1 = MyKnnRegressor(method="grid",
                                         criterion="flat",
                                         n_neighbors=i,
                                         grid_size=extensions,
                                         parallelize=False)
                my_knn2 = MyKnnRegressor(method="grid",
                                         criterion="weighted",
                                         n_neighbors=i,
                                         grid_size=extensions,
                                         parallelize=False)

                for k in n_output_feat:
                    A, C, B = gmd.generate_grid_arrays_single_pole(
                        j[0], j[1], k, a, y_val)
                    my_knn1.fit(A, B)
                    my_knn1.predict(C)

                    my_knn2.fit(A, B)
                    my_knn2.predict(C)
                    test_vec = C[:(j[0] * j[1] - a)]
                    npt.assert_array_almost_equal(
                        test_vec,
                        my_knn1.prediction,
                        decimal=10,
                        err_msg=
                        "a) Checking knn grid regression with flat criterion.")
                    npt.assert_array_almost_equal(
                        test_vec,
                        my_knn2.prediction,
                        decimal=10,
                        err_msg=
                        "b) Checking knn grid regression with weighted criterion."
                    )

        sizes = [[3, 2], [5, 4], [10, 3]]
        n_output_feat = [1, 3, 6]
        y_val = -.6
        for i in sizes:
            extensions = np.array([i[0] * 2, i[1] * 2], dtype=int)

            for j in [1, int(min(extensions) // 2 * min(extensions) // 2)]:
                my_knn1 = MyKnnRegressor(method="grid",
                                         criterion="flat",
                                         n_neighbors=j,
                                         grid_size=extensions,
                                         parallelize=False)
                my_knn2 = MyKnnRegressor(method="grid",
                                         criterion="weighted",
                                         n_neighbors=j,
                                         grid_size=extensions,
                                         parallelize=False)

                for k in n_output_feat:
                    A, C, B = gmd.generate_grid_arrays_double_pole(
                        i[0], i[1], k, y_val)

                    my_knn1.fit(A, B)
                    my_knn1.predict(C)
                    my_knn2.fit(A, B)
                    my_knn2.predict(C)
                    npt.assert_array_almost_equal(
                        abs(my_knn1.prediction),
                        abs(y_val),
                        decimal=10,
                        err_msg=
                        "c) Checking knn regression with flat criterion, 1<k<=min(Nx,Ny)//2."
                    )
                    npt.assert_array_almost_equal(
                        abs(my_knn2.prediction),
                        abs(y_val),
                        decimal=10,
                        err_msg=
                        "d) Checking knn regression with weighted criterion, 1<k<=min(Nx,Ny)//2."
                    )

            for j in [
                    int(extensions[0] * extensions[1] - 5),
                    int(extensions[0] * extensions[1] - 1)
            ]:

                my_knn1 = MyKnnRegressor(method="grid",
                                         criterion="flat",
                                         n_neighbors=j,
                                         grid_size=extensions,
                                         parallelize=False)
                my_knn2 = MyKnnRegressor(method="grid",
                                         criterion="weighted",
                                         n_neighbors=j,
                                         grid_size=extensions,
                                         parallelize=False)

                for k in n_output_feat:
                    A, C, B = gmd.generate_grid_arrays_double_pole(
                        i[0], i[1], k, y_val)

                    my_knn1.fit(A, B)
                    my_knn1.predict(C)
                    my_knn2.fit(A, B)
                    my_knn2.predict(C)

                    npt.assert_array_less(
                        abs(my_knn1.prediction),
                        abs(y_val),
                        err_msg=
                        "e) Checking knn regression with flat criterion, min(Nx,Ny)//2 < k <= Nx*Ny-2."
                    )
                    npt.assert_array_less(
                        abs(my_knn2.prediction),
                        abs(y_val),
                        err_msg=
                        "f) Checking knn regression with weighted criterion, M < k <= Nx*Ny-2."
                    )
Example #52
0
def test_hierarchical_six_class_delta_criter():
    """
    Clustering on less easily separable hierarchical data with 2 levels
    of six gaussians
    """

    np.random.seed(1)

    n = 100
    d = 3

    X11 = np.random.normal(-4, 0.8, size=(n, d))
    X21 = np.random.normal(-3, 0.8, size=(n, d))
    X31 = np.random.normal(-2, 0.8, size=(n, d))
    X12 = np.random.normal(2, 0.8, size=(n, d))
    X22 = np.random.normal(3, 0.8, size=(n, d))
    X32 = np.random.normal(4, 0.8, size=(n, d))
    X = np.vstack((X11, X21, X31, X12, X22, X32))

    y_lvl1 = np.repeat([0, 1], 3 * n)
    y_lvl2 = np.repeat([0, 1, 2, 3, 4, 5], n)

    # Perform clustering without setting delta_criter
    dc = DivisiveCluster(max_components=2)
    pred = dc.fit_predict(X)

    # re-number "pred" so that each column represents
    # a flat clustering at current level
    for lvl in range(1, pred.shape[1]):
        _, inds = np.unique(pred[:, :lvl + 1], axis=0, return_inverse=True)
        pred[:, lvl] = inds

    # Perform clustering while setting delta_criter
    dc = DivisiveCluster(max_components=2, delta_criter=10)
    pred_delta_criter = dc.fit_predict(X)

    # re-number "pred_delta_criter" so that each column represents
    # a flat clustering at current level
    for lvl in range(1, pred_delta_criter.shape[1]):
        _, inds = np.unique(pred_delta_criter[:, :lvl + 1],
                            axis=0,
                            return_inverse=True)
        pred_delta_criter[:, lvl] = inds

    # Assert that pred has more levels than pred_delta_criter
    assert_equal(pred.shape[1] - 1, pred_delta_criter.shape[1])

    # Assert that both pred_delta_criter and pred represent
    # perfect clustering at the first level
    ari_lvl1 = adjusted_rand_score(y_lvl1, pred[:, 0])
    assert_allclose(ari_lvl1, 1)
    ari_delta_criter_lvl1 = adjusted_rand_score(y_lvl1, pred_delta_criter[:,
                                                                          0])
    assert_allclose(ari_delta_criter_lvl1, 1)

    # Assert that pred_delta_criter leads to a clustering as good as
    # pred at the second level
    ari_lvl2 = adjusted_rand_score(y_lvl2, pred[:, 1])
    ari_delta_criter_lvl2 = adjusted_rand_score(y_lvl2, pred_delta_criter[:,
                                                                          1])
    assert_allclose(ari_delta_criter_lvl2, ari_lvl2)

    # Assert that pred suggests oversplitting at the last level (level 3)
    # which leads to a worse clustering than the last level
    # of pred_delta_criter (level 2)
    ari_lvl3 = adjusted_rand_score(y_lvl2, pred[:, -1])
    assert_array_less(ari_lvl3, ari_delta_criter_lvl2)
Example #53
0
def test_batch_reparametrization_sampler_samples_are_distinct_for_new_instances() -> None:
    model = _dim_two_gp()
    sampler1 = BatchReparametrizationSampler(100, model)
    sampler2 = BatchReparametrizationSampler(100, model)
    xs = tf.random.uniform([3, 5, 7, 2], dtype=tf.float64)
    npt.assert_array_less(1e-9, tf.abs(sampler2.sample(xs) - sampler1.sample(xs)))
Example #54
0
    def test_myknnregressor_predict_with_fit_serial(self):
        """
    
        Testing the serial KNN
    
        TEST 1: knn regressor is probed against several types of scenarios regarding the number of input and output features.
                A certain number of training instances is generated using the function "generate_spatially_gaussian_arrays_single_pole", through a gaussian process centered around the origin.
                The number of degress of freedom (the dimensionality of the input space) may vary from 1 to N, where N can be set by the user. The same holds for the number of output features.
                Several number of scenarios for the number of input/output features can be decided, as shown in the following, using proper lists of integers.
                Whichever the scenario will be, "generate_spatially_gaussian_arrays_single_pole" set the value of the output features equal to a constant y_val, set by the user.
                Being all the training instances equal in output space, the knn regression must returns a constant series of output values, all equal to arrays containing y_val, regardless the chosen criterion for the imputation.
                TEST 1 ascertains that this is actually what happens.

        TEST 2: same philosophy of TEST 1 with a main difference: training instances are clustered around two different poles ([-center, -center,...] and [center, center, ...] in input space) using a gaussian process with a quite narrow variance.
                Each cluster has the same amount of elements : size_train/2            
                Each element belonging to one of the two clusters is assigned a specific output value ([-y_val,-y_val,...] for pole 2, [y_val,y_val,...] for pole1).   
                Given the narrow spread of the two clusters, a properly functioning knn should work in this way:
                    
                    a) k<= size_train/2: k neighbors are all within a specific cluster, hence the result of the regression is the same as TEST 1
                    b) k>= size_train/2: k neighbors encompass members of the other cluster, hence the result of the regression is a number alpha, such that |alpha|<|y_val|
        
                All this is checked in the second part of the TEST
        """

        print("\n testing predict method using serial knn regressor")

        # TEST 1
        size_train = 100
        size_test = 5
        n_input_feat = [1, 3, 4]
        n_output_feat = [1, 3, 6]
        y_val = -.6

        for i in range(1, size_train + 1):

            my_knn1 = MyKnnRegressor(method="classic",
                                     criterion="flat",
                                     n_neighbors=i,
                                     parallelize=False)
            my_knn2 = MyKnnRegressor(method="classic",
                                     criterion="weighted",
                                     n_neighbors=i,
                                     parallelize=False)

            for i in n_input_feat:
                for j in n_output_feat:
                    A, C, B = gmd.generate_spatially_gaussian_arrays_single_pole(
                        i, j, size_train, size_test, y_val)
                    my_knn1.fit(A, B)
                    my_knn1.predict(C)

                    my_knn2.fit(A, B)
                    my_knn2.predict(C)
                    test_vec = C[:size_test]
                    npt.assert_array_almost_equal(
                        test_vec,
                        my_knn1.prediction,
                        decimal=10,
                        err_msg=
                        "a) Checking knn regression with flat criterion.")
                    npt.assert_array_almost_equal(
                        test_vec,
                        my_knn2.prediction,
                        decimal=10,
                        err_msg=
                        "b) Checking knn regression with weighted criterion.")

        # TEST 2

        size_train = 100
        size_test = 50
        y_val = -.6

        for i in range(1, size_train // 2 + 1):

            my_knn1 = MyKnnRegressor(method="classic",
                                     criterion="flat",
                                     n_neighbors=i,
                                     parallelize=False)
            my_knn2 = MyKnnRegressor(method="classic",
                                     criterion="weighted",
                                     n_neighbors=i,
                                     parallelize=False)

            for i in n_input_feat:
                for j in n_output_feat:
                    A, C, B = gmd.generate_spatially_gaussian_arrays_double_pole(
                        i, j, size_train, size_test, y_val)
                    my_knn1.fit(A, B)
                    my_knn1.predict(C)

                    my_knn2.fit(A, B)
                    my_knn2.predict(C)
                    test_vec = C[:size_test // 2]
                    test_vec = np.concatenate((test_vec, -test_vec), axis=0)

                    npt.assert_array_almost_equal(
                        test_vec,
                        my_knn1.prediction,
                        decimal=10,
                        err_msg=
                        "c) Checking knn regression with flat criterion, 1<k<n_train_samples//2."
                    )
                    npt.assert_array_almost_equal(
                        test_vec,
                        my_knn2.prediction,
                        decimal=10,
                        err_msg=
                        "d) Checking knn regression with weighted criterion, 1<k<n_train_samples//2."
                    )

        for i in range(size_train // 2 + 2, size_train + 1):

            my_knn1 = MyKnnRegressor(method="classic",
                                     criterion="flat",
                                     n_neighbors=i,
                                     parallelize=False)
            my_knn2 = MyKnnRegressor(method="classic",
                                     criterion="weighted",
                                     n_neighbors=i,
                                     parallelize=False)

            for i in n_input_feat:
                for j in n_output_feat:
                    A, C, B = gmd.generate_spatially_gaussian_arrays_double_pole(
                        i, j, size_train, size_test, y_val)
                    my_knn1.fit(A, B)
                    my_knn1.predict(C)

                    my_knn2.fit(A, B)
                    my_knn2.predict(C)

                    npt.assert_array_less(
                        abs(my_knn1.prediction),
                        abs(y_val),
                        err_msg=
                        "e) Checking knn regression with flat criterion, n_train_samples//2 < k <= n_train_samples."
                    )
                    npt.assert_array_less(
                        abs(my_knn2.prediction),
                        abs(y_val),
                        err_msg=
                        "f) Checking knn regression with weighted criterion, n_train_samples//2 <= k < n_train_samples."
                    )
Example #55
0
def test_montage():
    """Test making montages."""
    tempdir = _TempDir()
    inputs = dict(
        sfp='FidNz 0       9.071585155     -2.359754454\n'
            'FidT9 -6.711765       0.040402876     -3.251600355\n'
            'very_very_very_long_name -5.831241498 -4.494821698  4.955347697\n'
            'Cz 0       0       8.899186843',
        csd='// MatLab   Sphere coordinates [degrees]         Cartesian coordinates\n'  # noqa: E501
            '// Label       Theta       Phi    Radius         X         Y         Z       off sphere surface\n'  # noqa: E501
            'E1      37.700     -14.000       1.000    0.7677    0.5934   -0.2419  -0.00000000000000011\n'  # noqa: E501
            'E3      51.700      11.000       1.000    0.6084    0.7704    0.1908   0.00000000000000000\n'  # noqa: E501
            'E31      90.000     -11.000       1.000    0.0000    0.9816   -0.1908   0.00000000000000000\n'  # noqa: E501
            'E61     158.000     -17.200       1.000   -0.8857    0.3579   -0.2957  -0.00000000000000022',  # noqa: E501
        mm_elc='# ASA electrode file\nReferenceLabel  avg\nUnitPosition    mm\n'  # noqa:E501
               'NumberPositions=    68\n'
               'Positions\n'
               '-86.0761 -19.9897 -47.9860\n'
               '85.7939 -20.0093 -48.0310\n'
               '0.0083 86.8110 -39.9830\n'
               '-86.0761 -24.9897 -67.9860\n'
               'Labels\nLPA\nRPA\nNz\nDummy\n',
        m_elc='# ASA electrode file\nReferenceLabel  avg\nUnitPosition    m\n'
              'NumberPositions=    68\nPositions\n-.0860761 -.0199897 -.0479860\n'  # noqa:E501
              '.0857939 -.0200093 -.0480310\n.0000083 .00868110 -.0399830\n'
              '.08 -.02 -.04\n'
              'Labels\nLPA\nRPA\nNz\nDummy\n',
        txt='Site  Theta  Phi\n'
            'Fp1  -92    -72\n'
            'Fp2   92     72\n'
            'very_very_very_long_name       -92     72\n'
            'O2        92    -90\n',
        elp='346\n'
            'EEG\t      F3\t -62.027\t -50.053\t      85\n'
            'EEG\t      Fz\t  45.608\t      90\t      85\n'
            'EEG\t      F4\t   62.01\t  50.103\t      85\n'
            'EEG\t      FCz\t   68.01\t  58.103\t      85\n',
        hpts='eeg Fp1 -95.0 -3. -3.\n'
             'eeg AF7 -1 -1 -3\n'
             'eeg A3 -2 -2 2\n'
             'eeg A 0 0 0',
        bvef='<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\n'
             '<!-- Generated by EasyCap Configurator 19.05.2014 -->\n'
             '<Electrodes defaults="false">\n'
             '  <Electrode>\n'
             '    <Name>Fp1</Name>\n'
             '    <Theta>-90</Theta>\n'
             '    <Phi>-72</Phi>\n'
             '    <Radius>1</Radius>\n'
             '    <Number>1</Number>\n'
             '  </Electrode>\n'
             '  <Electrode>\n'
             '    <Name>Fz</Name>\n'
             '    <Theta>45</Theta>\n'
             '    <Phi>90</Phi>\n'
             '    <Radius>1</Radius>\n'
             '    <Number>2</Number>\n'
             '  </Electrode>\n'
             '  <Electrode>\n'
             '    <Name>F3</Name>\n'
             '    <Theta>-60</Theta>\n'
             '    <Phi>-51</Phi>\n'
             '    <Radius>1</Radius>\n'
             '    <Number>3</Number>\n'
             '  </Electrode>\n'
             '  <Electrode>\n'
             '    <Name>F7</Name>\n'
             '    <Theta>-90</Theta>\n'
             '    <Phi>-36</Phi>\n'
             '    <Radius>1</Radius>\n'
             '    <Number>4</Number>\n'
             '  </Electrode>\n'
             '</Electrodes>',
    )
    # Get actual positions and save them for checking
    # csd comes from the string above, all others come from commit 2fa35d4
    poss = dict(
        sfp=[[0.0, 9.07159, -2.35975], [-6.71176, 0.0404, -3.2516],
             [-5.83124, -4.49482, 4.95535], [0.0, 0.0, 8.89919]],
        mm_elc=[[-0.08608, -0.01999, -0.04799], [0.08579, -0.02001, -0.04803],
                [1e-05, 0.08681, -0.03998], [-0.08608, -0.02499, -0.06799]],
        m_elc=[[-0.08608, -0.01999, -0.04799], [0.08579, -0.02001, -0.04803],
               [1e-05, 0.00868, -0.03998], [0.08, -0.02, -0.04]],
        txt=[[-26.25044, 80.79056, -2.96646], [26.25044, 80.79056, -2.96646],
             [-26.25044, -80.79056, -2.96646], [0.0, -84.94822, -2.96646]],
        elp=[[-48.20043, 57.55106, 39.86971], [0.0, 60.73848, 59.4629],
             [48.1426, 57.58403, 39.89198], [41.64599, 66.91489, 31.8278]],
        hpts=[[-95, -3, -3], [-1, -1., -3.], [-2, -2, 2.], [0, 0, 0]],
        bvef=[[-2.62664445e-02,  8.08398039e-02,  5.20474890e-18],
              [3.68031324e-18,  6.01040764e-02,  6.01040764e-02],
              [-4.63256329e-02,  5.72073923e-02,  4.25000000e-02],
              [-6.87664445e-02,  4.99617464e-02,  5.20474890e-18]],
    )
    for key, text in inputs.items():
        kind = key.split('_')[-1]
        fname = op.join(tempdir, 'test.' + kind)
        with open(fname, 'w') as fid:
            fid.write(text)
        unit = 'mm' if kind == 'bvef' else 'm'
        montage = read_montage(fname, unit=unit)
        if kind in ('sfp', 'txt'):
            assert ('very_very_very_long_name' in montage.ch_names)
        assert_equal(len(montage.ch_names), 4)
        assert_equal(len(montage.ch_names), len(montage.pos))
        assert_equal(montage.pos.shape, (4, 3))
        assert_equal(montage.kind, 'test')
        if kind == 'csd':
            dtype = [('label', 'S4'), ('theta', 'f8'), ('phi', 'f8'),
                     ('radius', 'f8'), ('x', 'f8'), ('y', 'f8'), ('z', 'f8'),
                     ('off_sph', 'f8')]
            try:
                table = np.loadtxt(fname, skip_header=2, dtype=dtype)
            except TypeError:
                table = np.loadtxt(fname, skiprows=2, dtype=dtype)
            poss['csd'] = np.c_[table['x'], table['y'], table['z']]
        if kind == 'elc':
            # Make sure points are reasonable distance from geometric centroid
            centroid = np.sum(montage.pos, axis=0) / montage.pos.shape[0]
            distance_from_centroid = np.apply_along_axis(
                np.linalg.norm, 1,
                montage.pos - centroid)
            assert_array_less(distance_from_centroid, 0.2)
            assert_array_less(0.01, distance_from_centroid)
        assert_array_almost_equal(poss[key], montage.pos, 4, err_msg=key)

    # Bvef is either auto or mm in terms of "units"
    with pytest.raises(ValueError, match='be "auto" or "mm" for .bvef files.'):
        bvef_file = op.join(tempdir, 'test.' + 'bvef')
        read_montage(bvef_file, unit='m')

    # Test reading in different letter case.
    ch_names = ["F3", "FZ", "F4", "FC3", "FCz", "FC4", "C3", "CZ", "C4", "CP3",
                "CPZ", "CP4", "P3", "PZ", "P4", "O1", "OZ", "O2"]
    montage = read_montage('standard_1020', ch_names=ch_names)
    assert_array_equal(ch_names, montage.ch_names)

    # test transform
    input_strs = ["""
    eeg Fp1 -95.0 -31.0 -3.0
    eeg AF7 -81 -59 -3
    eeg AF3 -87 -41 28
    cardinal 2 -91 0 -42
    cardinal 1 0 -91 -42
    cardinal 3 0 91 -42
    """, """
    Fp1 -95.0 -31.0 -3.0
    AF7 -81 -59 -3
    AF3 -87 -41 28
    FidNz -91 0 -42
    FidT9 0 -91 -42
    FidT10 0 91 -42
    """]
    # sfp files seem to have Nz, T9, and T10 as fiducials:
    # https://github.com/mne-tools/mne-python/pull/4482#issuecomment-321980611

    kinds = ['test_fid.hpts', 'test_fid.sfp']

    for kind, input_str in zip(kinds, input_strs):
        fname = op.join(tempdir, kind)
        with open(fname, 'w') as fid:
            fid.write(input_str)
        montage = read_montage(op.join(tempdir, kind), transform=True)

        # check coordinate transformation
        pos = np.array([-95.0, -31.0, -3.0])
        nasion = np.array([-91, 0, -42])
        lpa = np.array([0, -91, -42])
        rpa = np.array([0, 91, -42])
        fids = np.vstack((nasion, lpa, rpa))
        trans = get_ras_to_neuromag_trans(fids[0], fids[1], fids[2])
        pos = apply_trans(trans, pos)
        assert_array_equal(montage.pos[0], pos)
        assert_array_equal(montage.nasion[[0, 2]], [0, 0])
        assert_array_equal(montage.lpa[[1, 2]], [0, 0])
        assert_array_equal(montage.rpa[[1, 2]], [0, 0])
        pos = np.array([-95.0, -31.0, -3.0])
        montage_fname = op.join(tempdir, kind)
        montage = read_montage(montage_fname, unit='mm')
        assert_array_equal(montage.pos[0], pos * 1e-3)

        # test with last
        info = create_info(montage.ch_names, 1e3,
                           ['eeg'] * len(montage.ch_names))
        _set_montage(info, montage)
        pos2 = np.array([c['loc'][:3] for c in info['chs']])
        assert_array_equal(pos2, montage.pos)
        assert_equal(montage.ch_names, info['ch_names'])

        info = create_info(
            montage.ch_names, 1e3, ['eeg'] * len(montage.ch_names))

        evoked = EvokedArray(
            data=np.zeros((len(montage.ch_names), 1)), info=info, tmin=0)

        # test return type as well as set montage
        assert (isinstance(evoked.set_montage(montage), type(evoked)))

        pos3 = np.array([c['loc'][:3] for c in evoked.info['chs']])
        assert_array_equal(pos3, montage.pos)
        assert_equal(montage.ch_names, evoked.info['ch_names'])

        # Warning should be raised when some EEG are not specified in montage
        info = create_info(montage.ch_names + ['foo', 'bar'], 1e3,
                           ['eeg'] * (len(montage.ch_names) + 2))
        with pytest.warns(RuntimeWarning, match='position specified'):
            _set_montage(info, montage)

    # Channel names can be treated case insensitive
    info = create_info(['FP1', 'af7', 'AF3'], 1e3, ['eeg'] * 3)
    _set_montage(info, montage)

    # Unless there is a collision in names
    info = create_info(['FP1', 'Fp1', 'AF3'], 1e3, ['eeg'] * 3)
    assert (info['dig'] is None)
    with pytest.warns(RuntimeWarning, match='position specified'):
        _set_montage(info, montage)
    assert len(info['dig']) == 5  # 2 EEG w/pos, 3 fiducials
    montage.ch_names = ['FP1', 'Fp1', 'AF3']
    info = create_info(['fp1', 'AF3'], 1e3, ['eeg', 'eeg'])
    assert (info['dig'] is None)
    with pytest.warns(RuntimeWarning, match='position specified'):
        _set_montage(info, montage, set_dig=False)
    assert (info['dig'] is None)

    # test get_pos2d method
    montage = read_montage("standard_1020")
    c3 = montage.get_pos2d()[montage.ch_names.index("C3")]
    c4 = montage.get_pos2d()[montage.ch_names.index("C4")]
    fz = montage.get_pos2d()[montage.ch_names.index("Fz")]
    oz = montage.get_pos2d()[montage.ch_names.index("Oz")]
    f1 = montage.get_pos2d()[montage.ch_names.index("F1")]
    assert (c3[0] < 0)  # left hemisphere
    assert (c4[0] > 0)  # right hemisphere
    assert (fz[1] > 0)  # frontal
    assert (oz[1] < 0)  # occipital
    assert_allclose(fz[0], 0, atol=1e-2)  # midline
    assert_allclose(oz[0], 0, atol=1e-2)  # midline
    assert (f1[0] < 0 and f1[1] > 0)  # left frontal

    # test get_builtin_montages function
    montages = get_builtin_montages()
    assert (len(montages) > 0)  # MNE should always ship with montages
    assert ("standard_1020" in montages)  # 10/20 montage
    assert ("standard_1005" in montages)  # 10/05 montage
def step_impl(context):
    tools = Tools()
    expected_rmse = parse_vector(context.text)
    rmse = tools.calculate_rmse(context._estimations, context._ground_truth)
    assert_array_less(rmse.value, expected_rmse.value)
Example #57
0
def test_dgapl21l1():
    """Test duality gap for L21 + L1 regularization."""
    n_orient = 2
    M, G, active_set = _generate_tf_data()
    n_times = M.shape[1]
    n_sources = G.shape[1]
    tstep, wsize = np.array([4, 2]), np.array([64, 16])
    n_steps = np.ceil(n_times / tstep.astype(float)).astype(int)
    n_freqs = wsize // 2 + 1
    n_coefs = n_steps * n_freqs
    phi = _Phi(wsize, tstep, n_coefs)
    phiT = _PhiT(tstep, n_freqs, n_steps, n_times)

    for l1_ratio in [0.05, 0.1]:
        alpha_max = norm_epsilon_inf(G, M, phi, l1_ratio, n_orient)
        alpha_space = (1. - l1_ratio) * alpha_max
        alpha_time = l1_ratio * alpha_max

        Z = np.zeros([n_sources, phi.n_coefs.sum()])
        # for alpha = alpha_max, Z = 0 is the solution so the dgap is 0
        gap = dgap_l21l1(M, G, Z, np.ones(n_sources, dtype=bool), alpha_space,
                         alpha_time, phi, phiT, n_orient, -np.inf)[0]

        assert_allclose(0., gap)
        # check that solution for alpha smaller than alpha_max is non 0:
        X_hat_tf, active_set_hat_tf, E, gap = tf_mixed_norm_solver(
            M,
            G,
            alpha_space / 1.01,
            alpha_time / 1.01,
            maxit=200,
            tol=1e-8,
            verbose=True,
            debias=False,
            n_orient=n_orient,
            tstep=tstep,
            wsize=wsize,
            return_gap=True)
        # allow possible small numerical errors (negative gap)
        assert_array_less(-1e-10, gap)
        assert_array_less(gap, 1e-8)
        assert_array_less(1, len(active_set_hat_tf))

        X_hat_tf, active_set_hat_tf, E, gap = tf_mixed_norm_solver(
            M,
            G,
            alpha_space / 5.,
            alpha_time / 5.,
            maxit=200,
            tol=1e-8,
            verbose=True,
            debias=False,
            n_orient=n_orient,
            tstep=tstep,
            wsize=wsize,
            return_gap=True)
        assert_array_less(-1e-10, gap)
        assert_array_less(gap, 1e-8)
        assert_array_less(1, len(active_set_hat_tf))
Example #58
0
def test_l21_mxne():
    """Test convergence of MxNE solver."""
    n, p, t, alpha = 30, 40, 20, 1.
    rng = np.random.RandomState(0)
    G = rng.randn(n, p)
    G /= np.std(G, axis=0)[None, :]
    X = np.zeros((p, t))
    X[0] = 3
    X[4] = -2
    M = np.dot(G, X)

    args = (M, G, alpha, 1000, 1e-8)
    with pytest.warns(None):  # CD
        X_hat_prox, active_set, _ = mixed_norm_solver(*args,
                                                      active_set_size=None,
                                                      debias=True,
                                                      solver='prox')
    assert_array_equal(np.where(active_set)[0], [0, 4])
    with pytest.warns(None):  # CD
        X_hat_cd, active_set, _, gap_cd = mixed_norm_solver(
            *args,
            active_set_size=None,
            debias=True,
            solver='cd',
            return_gap=True)
    assert_array_less(gap_cd, 1e-8)
    assert_array_equal(np.where(active_set)[0], [0, 4])
    with pytest.warns(None):  # CD
        X_hat_bcd, active_set, E, gap_bcd = mixed_norm_solver(
            M,
            G,
            alpha,
            maxit=1000,
            tol=1e-8,
            active_set_size=None,
            debias=True,
            solver='bcd',
            return_gap=True)
    assert_array_less(gap_bcd, 9.6e-9)
    assert_array_equal(np.where(active_set)[0], [0, 4])
    assert_allclose(X_hat_prox, X_hat_cd, rtol=1e-2)
    assert_allclose(X_hat_prox, X_hat_bcd, rtol=1e-2)
    assert_allclose(X_hat_bcd, X_hat_cd, rtol=1e-2)

    with pytest.warns(None):  # CD
        X_hat_prox, active_set, _ = mixed_norm_solver(*args,
                                                      active_set_size=2,
                                                      debias=True,
                                                      solver='prox')
    assert_array_equal(np.where(active_set)[0], [0, 4])
    with pytest.warns(None):  # CD
        X_hat_cd, active_set, _ = mixed_norm_solver(*args,
                                                    active_set_size=2,
                                                    debias=True,
                                                    solver='cd')
    assert_array_equal(np.where(active_set)[0], [0, 4])
    with pytest.warns(None):  # CD
        X_hat_bcd, active_set, _ = mixed_norm_solver(*args,
                                                     active_set_size=2,
                                                     debias=True,
                                                     solver='bcd')
    assert_array_equal(np.where(active_set)[0], [0, 4])
    assert_allclose(X_hat_bcd, X_hat_cd, rtol=1e-2)
    assert_allclose(X_hat_bcd, X_hat_prox, rtol=1e-2)

    with pytest.warns(None):  # CD
        X_hat_prox, active_set, _ = mixed_norm_solver(*args,
                                                      active_set_size=2,
                                                      debias=True,
                                                      n_orient=2,
                                                      solver='prox')
    assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5])
    with pytest.warns(None):  # CD
        X_hat_bcd, active_set, _ = mixed_norm_solver(*args,
                                                     active_set_size=2,
                                                     debias=True,
                                                     n_orient=2,
                                                     solver='bcd')
    assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5])

    # suppress a coordinate-descent warning here
    with pytest.warns(RuntimeWarning, match='descent'):
        X_hat_cd, active_set, _ = mixed_norm_solver(*args,
                                                    active_set_size=2,
                                                    debias=True,
                                                    n_orient=2,
                                                    solver='cd')
    assert_array_equal(np.where(active_set)[0], [0, 1, 4, 5])
    assert_allclose(X_hat_bcd, X_hat_prox, rtol=1e-2)
    assert_allclose(X_hat_bcd, X_hat_cd, rtol=1e-2)

    with pytest.warns(None):  # CD
        X_hat_bcd, active_set, _ = mixed_norm_solver(*args,
                                                     active_set_size=2,
                                                     debias=True,
                                                     n_orient=5,
                                                     solver='bcd')
    assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4])
    with pytest.warns(None):  # CD
        X_hat_prox, active_set, _ = mixed_norm_solver(*args,
                                                      active_set_size=2,
                                                      debias=True,
                                                      n_orient=5,
                                                      solver='prox')
    assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4])
    with pytest.warns(RuntimeWarning, match='descent'):
        X_hat_cd, active_set, _ = mixed_norm_solver(*args,
                                                    active_set_size=2,
                                                    debias=True,
                                                    n_orient=5,
                                                    solver='cd')

    assert_array_equal(np.where(active_set)[0], [0, 1, 2, 3, 4])
    assert_array_equal(X_hat_bcd, X_hat_cd)
    assert_allclose(X_hat_bcd, X_hat_prox, rtol=1e-2)
Example #59
0
    def test_from_keywords_master(self):

        zmin = 0.01
        zmax = 1.98
        log_mlow = 6.
        log_mhigh = 9.
        host_m200 = 10**13.
        LOS_normalization = 2000.
        draw_poisson = False
        log_mass_sheet_min = 7.
        log_mass_sheet_max = 10.
        kappa_scale = 1.
        delta_power_law_index = -0.17
        delta_power_law_index_coupling = 0.5
        cone_opening_angle = 8.
        m_pivot = 10**8
        sigma_sub = 0.1

        keywords_master = {
            'zmin': zmin,
            'zmax': zmax,
            'log_mc': None,
            'log_mlow': log_mlow,
            'sigma_sub': sigma_sub,
            'a_wdm': None,
            'b_wdm': None,
            'c_wdm': None,
            'log_mhigh': log_mhigh,
            'host_m200': host_m200,
            'host_c': 4.,
            'host_Rs': 40.,
            'r_tidal': '0.5Rs',
            'LOS_normalization': LOS_normalization,
            'draw_poisson': draw_poisson,
            'log_mass_sheet_min': log_mass_sheet_min,
            'log_mass_sheet_max': log_mass_sheet_max,
            'kappa_scale': kappa_scale,
            'delta_power_law_index': delta_power_law_index,
            'delta_power_law_index_coupling': delta_power_law_index_coupling,
            'm_pivot': m_pivot,
            'cone_opening_angle': cone_opening_angle
        }

        f = ProjectedNFW.from_keywords_master(keywords_master, self.lens_cosmo,
                                              self.geometry)
        rendering_radius, Rs, x_core_host, x200 = f.rmax2d_kpc, f._rs_kpc, f.xtidal, f.zmax_units_rs

        npt.assert_equal(
            rendering_radius, 0.5 * keywords_master['cone_opening_angle'] *
            self.geometry.kpc_per_arcsec_zlens)
        npt.assert_equal(x_core_host * Rs, 0.5 * Rs)
        npt.assert_equal(x200 * Rs, keywords_master['host_c'] * Rs)
        npt.assert_equal(Rs, keywords_master['host_Rs'])

        x, y, r3 = f.draw(10000)
        r2 = np.hypot(x, y)
        npt.assert_array_less(r2, rendering_radius)

        npt.assert_raises(Exception, ProjectedNFW.from_keywords_master,
                          {'blah': 0.}, self.lens_cosmo, self.geometry)
        npt.assert_raises(Exception, ProjectedNFW.from_keywords_master, {
            'host_m200': 10**13.,
            'host_c': 4.,
            'host_Rs': 50.,
            'r_tidal': 'Rss'
        }, self.lens_cosmo, self.geometry)
Example #60
0
def test_batch_reparametrization_sampler_samples_are_continuous() -> None:
    sampler = BatchReparametrizationSampler(100, _dim_two_gp())
    xs = tf.random.uniform([3, 5, 7, 2], dtype=tf.float64)
    npt.assert_array_less(tf.abs(sampler.sample(xs + 1e-20) - sampler.sample(xs)), 1e-20)