Example #1
0
def test_validate_time_stretch():

    def __test_bad_ts_tuple(ts_tuple):
        pytest.raises(ScaperError, scaper.core._validate_time_stretch,
                      ts_tuple)

    # bad consts
    bad_ps_values = [None, 1j, 'yes', [], [5], -5, 0]
    for bv in bad_ps_values:
        __test_bad_ts_tuple(('const', bv))

    # empty list for choose
    __test_bad_ts_tuple(('choose', []))

    # bad consts in list for choose
    for bv in bad_ps_values:
        __test_bad_ts_tuple(('choose', [bv]))

    # bad start time in distributions
    __test_bad_ts_tuple(('uniform', 0, 1))
    __test_bad_ts_tuple(('uniform', -5, 1))
    __test_bad_ts_tuple(('truncnorm', 5, 1, 0, 10))
    __test_bad_ts_tuple(('truncnorm', 5, 1, -5, 10))

    # Using normal dist must raise warning since can give neg or 0 values
    pytest.warns(
        ScaperWarning, scaper.core._validate_time_stretch, ('normal', 5, 1))
Example #2
0
def test_unary_ufunc(ufunc):
    if ufunc == 'fix':
        pytest.skip('fix calls floor in a way that we do not yet support')
    dafunc = getattr(da, ufunc)
    npfunc = getattr(np, ufunc)

    arr = np.random.randint(1, 100, size=(20, 20))
    darr = da.from_array(arr, 3)

    with pytest.warns(None):  # some invalid values (arccos, arcsin, etc.)
        # applying Dask ufunc doesn't trigger computation
        assert isinstance(dafunc(darr), da.Array)
        assert_eq(dafunc(darr), npfunc(arr), equal_nan=True)

    with pytest.warns(None):  # some invalid values (arccos, arcsin, etc.)
        # applying NumPy ufunc is lazy
        if isinstance(npfunc, np.ufunc):
            assert isinstance(npfunc(darr), da.Array)
        else:
            assert isinstance(npfunc(darr), np.ndarray)
        assert_eq(npfunc(darr), npfunc(arr), equal_nan=True)

    with pytest.warns(None):  # some invalid values (arccos, arcsin, etc.)
        # applying Dask ufunc to normal ndarray triggers computation
        assert isinstance(dafunc(arr), np.ndarray)
        assert_eq(dafunc(arr), npfunc(arr), equal_nan=True)
Example #3
0
def test_head_translation():
    """Test Maxwell filter head translation."""
    raw = read_crop(raw_fname, (0., 1.))
    # First try with an unchanged destination
    with use_coil_def(elekta_def_fname):
        raw_sss = maxwell_filter(raw, destination=raw_fname,
                                 origin=mf_head_origin, regularize=None,
                                 bad_condition='ignore')
    assert_meg_snr(raw_sss, read_crop(sss_std_fname, (0., 1.)), 200.)
    # Now with default
    with use_coil_def(elekta_def_fname):
        with pytest.warns(RuntimeWarning, match='over 25 mm'):
            raw_sss = maxwell_filter(raw, destination=mf_head_origin,
                                     origin=mf_head_origin, regularize=None,
                                     bad_condition='ignore', verbose=True)
    assert_meg_snr(raw_sss, read_crop(sss_trans_default_fname), 125.)
    destination = np.eye(4)
    destination[2, 3] = 0.04
    assert_allclose(raw_sss.info['dev_head_t']['trans'], destination)
    # Now to sample's head pos
    with pytest.warns(RuntimeWarning, match='= 25.6 mm'):
        raw_sss = maxwell_filter(raw, destination=sample_fname,
                                 origin=mf_head_origin, regularize=None,
                                 bad_condition='ignore', verbose=True)
    assert_meg_snr(raw_sss, read_crop(sss_trans_sample_fname), 13., 100.)
    assert_allclose(raw_sss.info['dev_head_t']['trans'],
                    read_info(sample_fname)['dev_head_t']['trans'])
    # Degenerate cases
    pytest.raises(RuntimeError, maxwell_filter, raw,
                  destination=mf_head_origin, coord_frame='meg')
    pytest.raises(ValueError, maxwell_filter, raw, destination=[0.] * 4)
Example #4
0
    def test_read_on_missing(self, instance):
        with h5py.File('test', driver='core', backing_store=False) as h5f:
            instance.write(h5f)
            names = ['randomname']

            def _read(**kwargs):
                return self.TEST_CLASS.read(h5f, names=names, format='hdf5',
                                            **kwargs)

            # check on_missing='error' (default) raises ValueError
            with pytest.raises(ValueError) as exc:
                _read()
            assert str(exc.value) == ('\'randomname\' not found in any input '
                                      'file')

            # check on_missing='warn' prints warning
            with pytest.warns(UserWarning):
                _read(on_missing='warn')

            # check on_missing='ignore' does nothing
            with pytest.warns(None) as record:
                _read(on_missing='ignore')
            assert not record.list

            # check on_missing=<anything else> raises exception
            with pytest.raises(ValueError) as exc:
                _read(on_missing='blah')
Example #5
0
def test_PR_424():
    """Ensure deprecation and user warnings are triggered."""
    import warnings
    warnings.simplefilter('always') # Alert us of deprecation warnings.

    # Recommended use
    ColorClip([1000, 600], color=(60, 60, 60), duration=10).close()

    with pytest.warns(DeprecationWarning):
        # Uses `col` so should work the same as above, but give warning.
        ColorClip([1000, 600], col=(60, 60, 60), duration=10).close()

    # Catch all warnings as record.
    with pytest.warns(None) as record:
        # Should give 2 warnings and use `color`, not `col`
        ColorClip([1000, 600], color=(60, 60, 60), duration=10, col=(2,2,2)).close()

    message1 = 'The `ColorClip` parameter `col` has been deprecated. ' + \
               'Please use `color` instead.'
    message2 = 'The arguments `color` and `col` have both been passed to ' + \
               '`ColorClip` so `col` has been ignored.'

    # Assert that two warnings popped and validate the message text.
    assert len(record) == 2
    assert str(record[0].message) == message1
    assert str(record[1].message) == message2
Example #6
0
def test_deprecated_07():
    with pytest.warns(pvlibDeprecationWarning):
        from pvlib.tmy import readtmy2
        readtmy2(tmy2_testfile)
    with pytest.warns(pvlibDeprecationWarning):
        from pvlib.tmy import readtmy3
        readtmy3(tmy3_testfile)
Example #7
0
def test_warnings():
    a = Input(shape=(3,), name='input_a')
    b = Input(shape=(3,), name='input_b')

    a_2 = Dense(4, name='dense_1')(a)
    dp = Dropout(0.5, name='dropout')
    b_2 = dp(b)

    model = Model([a, b], [a_2, b_2])

    optimizer = 'rmsprop'
    loss = 'mse'
    loss_weights = [1., 0.5]
    model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights,
                  sample_weight_mode=None)

    def gen_data(batch_sz):
        while True:
            yield ([np.random.random((batch_sz, 3)), np.random.random((batch_sz, 3))],
                   [np.random.random((batch_sz, 4)), np.random.random((batch_sz, 3))])

    with pytest.warns(Warning) as w:
        out = model.fit_generator(gen_data(4), steps_per_epoch=10, use_multiprocessing=True, workers=2)
    warning_raised = any(['Sequence' in str(w_.message) for w_ in w])
    assert warning_raised, 'No warning raised when using generator with processes.'

    with pytest.warns(None) as w:
        out = model.fit_generator(RandomSequence(3), steps_per_epoch=4, use_multiprocessing=True, workers=2)
    assert all(['Sequence' not in str(w_.message) for w_ in w]), 'A warning was raised for Sequence.'
Example #8
0
def test_plot_ica_overlay():
    """Test plotting of ICA cleaning."""
    import matplotlib.pyplot as plt
    raw = _get_raw(preload=True)
    picks = _get_picks(raw)
    ica = ICA(noise_cov=read_cov(cov_fname), n_components=2,
              max_pca_components=3, n_pca_components=3)
    # can't use info.normalize_proj here because of how and when ICA and Epochs
    # objects do picking of Raw data
    with pytest.warns(RuntimeWarning, match='projection'):
        ica.fit(raw, picks=picks)
    # don't test raw, needs preload ...
    with pytest.warns(RuntimeWarning, match='projection'):
        ecg_epochs = create_ecg_epochs(raw, picks=picks)
    ica.plot_overlay(ecg_epochs.average())
    with pytest.warns(RuntimeWarning, match='projection'):
        eog_epochs = create_eog_epochs(raw, picks=picks)
    ica.plot_overlay(eog_epochs.average())
    pytest.raises(TypeError, ica.plot_overlay, raw[:2, :3][0])
    ica.plot_overlay(raw)
    plt.close('all')

    # smoke test for CTF
    raw = read_raw_fif(raw_ctf_fname)
    raw.apply_gradient_compensation(3)
    picks = pick_types(raw.info, meg=True, ref_meg=False)
    ica = ICA(n_components=2, max_pca_components=3, n_pca_components=3)
    ica.fit(raw, picks=picks)
    with pytest.warns(RuntimeWarning, match='longer than'):
        ecg_epochs = create_ecg_epochs(raw)
    ica.plot_overlay(ecg_epochs.average())
    plt.close('all')
Example #9
0
 def test_xindex(self):
     x = numpy.linspace(0, 100, num=self.data.shape[0])
     # test simple
     series = self.create(xindex=x)
     self.assertQuantityEqual(
         series.xindex, units.Quantity(x, self.TEST_CLASS._default_xunit))
     # test deleter
     del series.xindex
     del series.xindex
     x1 = series.x0.value + series.shape[0] * series.dx.value
     x_default = numpy.linspace(series.x0.value, x1, num=series.shape[0],
                                endpoint=False)
     self.assertQuantityEqual(
         series.xindex,
         units.Quantity(x_default, self.TEST_CLASS._default_xunit))
     # test setting of x0 and dx
     series = self.create(xindex=units.Quantity(x, 'Farad'))
     self.assertEqual(series.x0, units.Quantity(x[0], 'Farad'))
     self.assertEqual(series.dx, units.Quantity(x[1] - x[0], 'Farad'))
     self.assertEqual(series.xunit, units.Farad)
     self.assertEqual(series.xspan, (x[0], x[-1] + x[1] - x[0]))
     # test that setting xindex warns about ignoring dx or x0
     with pytest.warns(UserWarning):
         series = self.create(xindex=units.Quantity(x, 'Farad'), dx=1)
     with pytest.warns(UserWarning):
         series = self.create(xindex=units.Quantity(x, 'Farad'), x0=0)
     # test non-regular xindex
     x = numpy.logspace(0, 2, num=self.data.shape[0])
     series = self.create(xindex=units.Quantity(x, 'Mpc'))
     def _get_dx():
         series.dx
     self.assertRaises(AttributeError, _get_dx)
     self.assertEqual(series.x0, units.Quantity(1, 'Mpc'))
     self.assertEqual(series.xspan, (x[0], x[-1] + x[-1] - x[-2]))
Example #10
0
    def test_iterate_respects_subpolling_interval(self):
        r1 = self.app.AsyncResult(uuid())
        r2 = self.app.AsyncResult(uuid())
        backend = r1.backend = r2.backend = Mock()
        backend.subpolling_interval = 10

        ready = r1.ready = r2.ready = Mock()

        def se(*args, **kwargs):
            ready.side_effect = KeyError()
            return False
        ready.return_value = False
        ready.side_effect = se

        x = self.app.ResultSet([r1, r2])
        with self.dummy_copy():
            with patch('celery.result.time') as _time:
                with pytest.warns(CPendingDeprecationWarning):
                    with pytest.raises(KeyError):
                        list(x.iterate())
                _time.sleep.assert_called_with(10)

            backend.subpolling_interval = 0
            with patch('celery.result.time') as _time:
                with pytest.warns(CPendingDeprecationWarning):
                    with pytest.raises(KeyError):
                        ready.return_value = False
                        ready.side_effect = se
                        list(x.iterate())
                    _time.sleep.assert_not_called()
def test_docstring_parameters():
    """Test module docstring formatting."""
    from numpydoc import docscrape
    incorrect = []
    for name in public_modules:
        with pytest.warns(None):  # traits warnings
            module = __import__(name, globals())
        for submod in name.split('.')[1:]:
            module = getattr(module, submod)
        classes = inspect.getmembers(module, inspect.isclass)
        for cname, cls in classes:
            if cname.startswith('_') and cname not in _doc_special_members:
                continue
            with pytest.warns(None) as w:
                cdoc = docscrape.ClassDoc(cls)
            for ww in w:
                if 'Using or importing the ABCs' not in str(ww.message):
                    raise RuntimeError('Error for __init__ of %s in %s:\n%s'
                                       % (cls, name, ww))
            if hasattr(cls, '__init__'):
                incorrect += check_parameters_match(cls.__init__, cdoc, cls)
            for method_name in cdoc.methods:
                method = getattr(cls, method_name)
                incorrect += check_parameters_match(method, cls=cls)
            if hasattr(cls, '__call__'):
                incorrect += check_parameters_match(cls.__call__, cls=cls)
        functions = inspect.getmembers(module, inspect.isfunction)
        for fname, func in functions:
            if fname.startswith('_'):
                continue
            incorrect += check_parameters_match(func)
    msg = '\n' + '\n'.join(sorted(list(set(incorrect))))
    if len(incorrect) > 0:
        raise AssertionError(msg)
Example #12
0
async def test_app_ctor() -> None:
    loop = asyncio.get_event_loop()
    with pytest.warns(DeprecationWarning):
        app = web.Application(loop=loop)
    with pytest.warns(DeprecationWarning):
        assert loop is app.loop
    assert app.logger is log.web_logger
Example #13
0
def test_ica_ctf():
    """Test run ICA computation on ctf data with/without compensation."""
    method = 'fastica'
    raw = read_raw_ctf(ctf_fname, preload=True)
    events = make_fixed_length_events(raw, 99999)
    for comp in [0, 1]:
        raw.apply_gradient_compensation(comp)
        epochs = Epochs(raw, events, None, -0.2, 0.2, preload=True)
        evoked = epochs.average()

        # test fit
        for inst in [raw, epochs]:
            ica = ICA(n_components=2, random_state=0, max_iter=2,
                      method=method)
            with pytest.warns(UserWarning, match='did not converge'):
                ica.fit(inst)

        # test apply and get_sources
        for inst in [raw, epochs, evoked]:
            ica.apply(inst)
            ica.get_sources(inst)

    # test mixed compensation case
    raw.apply_gradient_compensation(0)
    ica = ICA(n_components=2, random_state=0, max_iter=2, method=method)
    with pytest.warns(UserWarning, match='did not converge'):
        ica.fit(raw)
    raw.apply_gradient_compensation(1)
    epochs = Epochs(raw, events, None, -0.2, 0.2, preload=True)
    evoked = epochs.average()
    for inst in [raw, epochs, evoked]:
        with pytest.raises(RuntimeError, match='Compensation grade of ICA'):
            ica.apply(inst)
        with pytest.raises(RuntimeError, match='Compensation grade of ICA'):
            ica.get_sources(inst)
Example #14
0
    def test_default(self):
        """ Default semantics in the presence or absence of a file """
        fname = self.mktemp()

        # No existing file; create a new file and open RW
        with pytest.warns(H5pyDeprecationWarning):
            with File(fname) as f:
                self.assertTrue(f)
                self.assertEqual(f.mode, 'r+')

        # Existing readonly file; open read-only
        os.chmod(fname, stat.S_IREAD)
        # Running as root (e.g. in a docker container) gives 'r+' as the file
        # mode, even for a read-only file.  See
        # https://github.com/h5py/h5py/issues/696
        exp_mode = 'r+' if os.stat(fname).st_uid == 0 and platform != "win32" else 'r'
        try:
            with pytest.warns(H5pyDeprecationWarning):
                with File(fname) as f:
                    self.assertTrue(f)
                    self.assertEqual(f.mode, exp_mode)
        finally:
            os.chmod(fname, stat.S_IWRITE)

        # File exists but is not HDF5; raise IOError
        with open(fname, 'wb') as f:
            f.write(b'\x00')
        with pytest.warns(H5pyDeprecationWarning):
            with self.assertRaises(IOError):
                File(fname)
Example #15
0
def check_min_samples_leaf(name):
    X, y = hastie_X, hastie_y

    # Test if leaves contain more than leaf_count training examples
    ForestEstimator = FOREST_ESTIMATORS[name]

    # test boundary value
    with pytest.warns(DeprecationWarning, match='min_samples_leaf'):
        assert_raises(ValueError,
                      ForestEstimator(min_samples_leaf=-1).fit, X, y)
    with pytest.warns(DeprecationWarning, match='min_samples_leaf'):
        assert_raises(ValueError,
                      ForestEstimator(min_samples_leaf=0).fit, X, y)

    est = ForestEstimator(min_samples_leaf=5, n_estimators=1, random_state=0)
    with pytest.warns(DeprecationWarning, match='min_samples_leaf'):
        est.fit(X, y)
    out = est.estimators_[0].tree_.apply(X)
    node_counts = np.bincount(out)
    # drop inner nodes
    leaf_count = node_counts[node_counts != 0]
    assert_greater(np.min(leaf_count), 4,
                   "Failed with {0}".format(name))

    est = ForestEstimator(min_samples_leaf=0.25, n_estimators=1,
                          random_state=0)
    with pytest.warns(DeprecationWarning, match='min_samples_leaf'):
        est.fit(X, y)
    out = est.estimators_[0].tree_.apply(X)
    node_counts = np.bincount(out)
    # drop inner nodes
    leaf_count = node_counts[node_counts != 0]
    assert_greater(np.min(leaf_count), len(X) * 0.25 - 1,
                   "Failed with {0}".format(name))
Example #16
0
def test_morph():
    """Test inter-subject label morphing."""
    label_orig = read_label(real_label_fname)
    label_orig.subject = 'sample'
    # should work for specifying vertices for both hemis, or just the
    # hemi of the given label
    vals = list()
    for grade in [5, [np.arange(10242), np.arange(10242)], np.arange(10242)]:
        label = label_orig.copy()
        # this should throw an error because the label has all zero values
        pytest.raises(ValueError, label.morph, 'sample', 'fsaverage')
        label.values.fill(1)
        label = label.morph(None, 'fsaverage', 5, grade, subjects_dir, 1)
        label = label.morph('fsaverage', 'sample', 5, None, subjects_dir, 2)
        assert (np.in1d(label_orig.vertices, label.vertices).all())
        assert (len(label.vertices) < 3 * len(label_orig.vertices))
        vals.append(label.vertices)
    assert_array_equal(vals[0], vals[1])
    # make sure label smoothing can run
    assert_equal(label.subject, 'sample')
    verts = [np.arange(10242), np.arange(10242)]
    for hemi in ['lh', 'rh']:
        label.hemi = hemi
        with pytest.warns(None):  # morph map maybe missing
            label.morph(None, 'fsaverage', 5, verts, subjects_dir, 2)
    pytest.raises(TypeError, label.morph, None, 1, 5, verts,
                  subjects_dir, 2)
    pytest.raises(TypeError, label.morph, None, 'fsaverage', 5.5, verts,
                  subjects_dir, 2)
    with pytest.warns(None):  # morph map maybe missing
        label.smooth(subjects_dir=subjects_dir)  # make sure this runs
Example #17
0
def test_deprecation():
    """Test our various warnings."""
    with pytest.warns(deprecation.MetpyDeprecationWarning):
        FakeyMcFakeface.dontuse()
        assert FakeyMcFakeface.dontuse.__doc__ == "Don't use."
    with pytest.warns(deprecation.MetpyDeprecationWarning):
        FakeyMcFakeface.really_dontuse()
Example #18
0
def test_compression_args():

    z = create(100, compression='zlib', compression_opts=9)
    assert_is_instance(z, Array)
    eq('zlib', z.compressor.codec_id)
    eq(9, z.compressor.level)

    # 'compressor' overrides 'compression'
    z = create(100, compressor=Zlib(9), compression='bz2', compression_opts=1)
    assert_is_instance(z, Array)
    eq('zlib', z.compressor.codec_id)
    eq(9, z.compressor.level)

    # 'compressor' ignores 'compression_opts'
    z = create(100, compressor=Zlib(9), compression_opts=1)
    assert_is_instance(z, Array)
    eq('zlib', z.compressor.codec_id)
    eq(9, z.compressor.level)

    with pytest.warns(UserWarning):
        # 'compressor' overrides 'compression'
        create(100, compressor=Zlib(9), compression='bz2', compression_opts=1)
    with pytest.warns(UserWarning):
        # 'compressor' ignores 'compression_opts'
        create(100, compressor=Zlib(9), compression_opts=1)
Example #19
0
 def test_old_encodings(
         delimiter, tmpdir, main_table_file,
         subann_table_file, anns_key, subs_key):
     """ Current and previous encoding of tables works, deprecated appropriately. """
     # Data setup
     anns_data, subs_data = LINES_BY_DELIM[delimiter]
     anns_file = _write(main_table_file, anns_data)
     subs_file = _write(subann_table_file, subs_data)
     conf_file = tmpdir.join("conf.yaml").strpath
     conf_data = {
         METADATA_KEY: {
             anns_key: anns_file,
             subs_key: subs_file,
             OUTDIR_KEY: tmpdir.strpath
         }
     }
     # Project creation
     with open(conf_file, 'w') as cfg:
         yaml.dump(conf_data, cfg)
     prj = Project(conf_file)
     # Behavioral validation/assertions
     with pytest.warns(DeprecationWarning):
         anns1 = getattr(prj, anns_key)
     with pytest.warns(DeprecationWarning):
         anns2 = prj[anns_key]
     with pytest.warns(DeprecationWarning):
         subs1 = getattr(prj, subs_key)
     with pytest.warns(DeprecationWarning):
         subs2 = prj[subs_key]
     # Validation that we didn't just get back garbage value(s)
     assert all((anns1 == anns2).all())
     assert all((subs1 == subs2).all())
Example #20
0
def test_from_networkx_with_bad_attributes():
    G = nx.Graph()
    G.add_nodes_from([(0, {"index": "a", "attr_1": 10}),
                      (1, {"index": "b", "attr_1": 20})])
    G.add_edges_from([[0, 1]])

    with pytest.warns(UserWarning):
        renderer = from_networkx(G, nx.circular_layout)
        assert renderer.node_renderer.data_source.data["index"] == [0, 1]
        assert renderer.node_renderer.data_source.data["attr_1"] == [10, 20]

    G = nx.Graph()
    G.add_nodes_from([0, 1])
    G.add_edges_from([(0, 1, {"start": "A", "attr_1": 10})])

    with pytest.warns(UserWarning):
        renderer = from_networkx(G, nx.circular_layout)
        assert renderer.edge_renderer.data_source.data["start"] == [0]
        assert renderer.edge_renderer.data_source.data["end"] == [1]
        assert renderer.edge_renderer.data_source.data["attr_1"] == [10]

    G = nx.Graph()
    G.add_nodes_from([0, 1])
    G.add_edges_from([(0, 1, {"end": "A", "attr_1": 10})])

    with pytest.warns(UserWarning):
        renderer = from_networkx(G, nx.circular_layout)
        assert renderer.edge_renderer.data_source.data["start"] == [0]
        assert renderer.edge_renderer.data_source.data["end"] == [1]
        assert renderer.edge_renderer.data_source.data["attr_1"] == [10]
def test_make_column_transformer():
    scaler = StandardScaler()
    norm = Normalizer()
    ct = make_column_transformer((scaler, 'first'), (norm, ['second']))
    names, transformers, columns = zip(*ct.transformers)
    assert_equal(names, ("standardscaler", "normalizer"))
    assert_equal(transformers, (scaler, norm))
    assert_equal(columns, ('first', ['second']))

    # XXX remove in v0.22
    with pytest.warns(DeprecationWarning,
                      match='`make_column_transformer` now expects'):
        ct1 = make_column_transformer(([0], norm))
    ct2 = make_column_transformer((norm, [0]))
    X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
    assert_almost_equal(ct1.fit_transform(X_array),
                        ct2.fit_transform(X_array))

    with pytest.warns(DeprecationWarning,
                      match='`make_column_transformer` now expects'):
        make_column_transformer(('first', 'drop'))

    with pytest.warns(DeprecationWarning,
                      match='`make_column_transformer` now expects'):
        make_column_transformer(('passthrough', 'passthrough'),
                                ('first', 'drop'))
Example #22
0
def test_io_inverse_operator():
    """Test IO of inverse_operator."""
    tempdir = _TempDir()
    inverse_operator = read_inverse_operator(fname_inv)
    x = repr(inverse_operator)
    assert (x)
    assert (isinstance(inverse_operator['noise_cov'], Covariance))
    # just do one example for .gz, as it should generalize
    _compare_io(inverse_operator, '.gz')

    # test warnings on bad filenames
    inv_badname = op.join(tempdir, 'test-bad-name.fif.gz')
    with pytest.warns(RuntimeWarning, match='-inv.fif'):
        write_inverse_operator(inv_badname, inverse_operator)
    with pytest.warns(RuntimeWarning, match='-inv.fif'):
        read_inverse_operator(inv_badname)

    # make sure we can write and read
    inv_fname = op.join(tempdir, 'test-inv.fif')
    args = (10, 1. / 9., 'dSPM')
    inv_prep = prepare_inverse_operator(inverse_operator, *args)
    write_inverse_operator(inv_fname, inv_prep)
    inv_read = read_inverse_operator(inv_fname)
    _compare(inverse_operator, inv_read)
    inv_read_prep = prepare_inverse_operator(inv_read, *args)
    _compare(inv_prep, inv_read_prep)
    inv_prep_prep = prepare_inverse_operator(inv_prep, *args)
    _compare(inv_prep, inv_prep_prep)
Example #23
0
def test_version_mismatch_file():

    testfile = str(get_test_data_path('version_mismatch.fits'))

    with pytest.warns(None) as w:
        with asdf.open(testfile,
                ignore_version_mismatch=False) as fits_handle:
            assert fits_handle.tree['a'] == complex(0j)
    # This is the warning that we expect from opening the FITS file
    assert len(w) == 1, display_warnings(w)
    assert str(w[0].message) == (
        "'tag:stsci.edu:asdf/core/complex' with version 7.0.0 found in file "
        "'{}', but latest supported version is 1.0.0".format(testfile))

    # Make sure warning does not occur when warning is ignored (default)
    with pytest.warns(None) as w:
        with asdf.open(testfile) as fits_handle:
            assert fits_handle.tree['a'] == complex(0j)
    assert len(w) == 0, display_warnings(w)

    with pytest.warns(None) as w:
        with fits_embed.AsdfInFits.open(testfile,
                ignore_version_mismatch=False) as fits_handle:
            assert fits_handle.tree['a'] == complex(0j)
    assert len(w) == 1
    assert str(w[0].message) == (
        "'tag:stsci.edu:asdf/core/complex' with version 7.0.0 found in file "
        "'{}', but latest supported version is 1.0.0".format(testfile))

    # Make sure warning does not occur when warning is ignored (default)
    with pytest.warns(None) as w:
        with fits_embed.AsdfInFits.open(testfile) as fits_handle:
            assert fits_handle.tree['a'] == complex(0j)
    assert len(w) == 0, display_warnings(w)
Example #24
0
def test_compression_args():

    z = create(100, compression='zlib', compression_opts=9)
    assert isinstance(z, Array)
    assert 'zlib' == z.compressor.codec_id
    assert 9 == z.compressor.level

    # 'compressor' overrides 'compression'
    z = create(100, compressor=Zlib(9), compression='bz2', compression_opts=1)
    assert isinstance(z, Array)
    assert 'zlib' == z.compressor.codec_id
    assert 9 == z.compressor.level

    # 'compressor' ignores 'compression_opts'
    z = create(100, compressor=Zlib(9), compression_opts=1)
    assert isinstance(z, Array)
    assert 'zlib' == z.compressor.codec_id
    assert 9 == z.compressor.level

    with pytest.warns(UserWarning):
        # 'compressor' overrides 'compression'
        create(100, compressor=Zlib(9), compression='bz2', compression_opts=1)
    with pytest.warns(UserWarning):
        # 'compressor' ignores 'compression_opts'
        create(100, compressor=Zlib(9), compression_opts=1)
Example #25
0
def test_deprecated_rfc6979_signature():
    with pytest.warns(CryptographyDeprecationWarning):
        sig = encode_rfc6979_signature(1, 1)
    assert sig == b"0\x06\x02\x01\x01\x02\x01\x01"
    with pytest.warns(CryptographyDeprecationWarning):
        decoded = decode_rfc6979_signature(sig)
    assert decoded == (1, 1)
def test_pairwise_boolean_distance(metric):
    # test that we convert to boolean arrays for boolean distances
    rng = np.random.RandomState(0)
    X = rng.randn(5, 4)
    Y = X.copy()
    Y[0, 0] = 1 - Y[0, 0]

    # ignore conversion to boolean in pairwise_distances
    with ignore_warnings(category=DataConversionWarning):
        for Z in [Y, None]:
            res = pairwise_distances(X, Z, metric=metric)
            res[np.isnan(res)] = 0
            assert np.sum(res != 0) == 0

    # non-boolean arrays are converted to boolean for boolean
    # distance metrics with a data conversion warning
    msg = "Data was converted to boolean for metric %s" % metric
    with pytest.warns(DataConversionWarning, match=msg):
        pairwise_distances(X, metric=metric)

    # Check that the warning is raised if X is boolean by Y is not boolean:
    with pytest.warns(DataConversionWarning, match=msg):
        pairwise_distances(X.astype(bool), Y=Y, metric=metric)

    # Check that no warning is raised if X is already boolean and Y is None:
    with pytest.warns(None) as records:
        pairwise_distances(X.astype(bool), metric=metric)
    assert len(records) == 0
Example #27
0
def test_old_input_deprecation_warning():
    with nengo.Network():
        c = nengo.networks.CircularConvolution(n_neurons=10, dimensions=1)
        with pytest.warns(DeprecationWarning):
            assert c.A is c.input_a
        with pytest.warns(DeprecationWarning):
            assert c.B is c.input_b
    def test_order_after_filter_does_not_warn_on_absent_keys(self):
        with pytest.warns(None) as warnings:
            sources = self.flourish.sources.order_by('type')
            assert type(sources) == Flourish
            assert len(sources) == 6
            assert len(warnings) == 1
            assert (
                str(warnings[0].message) ==
                'sorting sources by "type" failed: '
                'not all sources have that attribute'
            )

        with pytest.warns(None) as warnings:
            sources = self.flourish.sources.filter(type='post')
            assert len(sources) == 5
            sources = sources.order_by('type', 'published')
            assert type(sources) == Flourish
            assert len(sources) == 5
            assert len(warnings) == 0
            assert [
                    'series/part-one',
                    'series/part-two',
                    'thing-one',
                    'thing-two',
                    'series/part-three',
                ] == [source.slug for source in sources]
Example #29
0
def test_tokenize_dense_sparse_array(cls_name):
    rng = np.random.RandomState(1234)

    with pytest.warns(None):
        # ignore scipy.sparse.SparseEfficiencyWarning
        a = sp.rand(10, 10000, random_state=rng).asformat(cls_name)
    b = a.copy()

    assert tokenize(a) == tokenize(b)

    # modifying the data values
    if hasattr(b, 'data'):
        b.data[:10] = 1
    elif cls_name == 'dok':
        b[3, 3] = 1
    else:
        raise ValueError

    assert tokenize(a) != tokenize(b)

    # modifying the data indices
    with pytest.warns(None):
        b = a.copy().asformat('coo')
        b.row[:10] = np.arange(10)
        b = b.asformat(cls_name)
    assert tokenize(a) != tokenize(b)
    def test_deprecations(self):
        with pytest.warns(PendingDeprecationWarning) as record:
            @api_view(["GET"], exclude_from_schema=True)
            def view(request):
                pass

        assert len(record) == 1
        assert str(record[0].message) == (
            "The `exclude_from_schema` argument to `api_view` is pending "
            "deprecation. Use the `schema` decorator instead, passing `None`."
        )

        class OldFashionedExcludedView(APIView):
            exclude_from_schema = True

            def get(self, request, *args, **kwargs):
                pass

        patterns = [
            url('^excluded-old-fashioned/$', OldFashionedExcludedView.as_view()),
        ]

        inspector = EndpointEnumerator(patterns)
        with pytest.warns(PendingDeprecationWarning) as record:
            inspector.get_api_endpoints()

        assert len(record) == 1
        assert str(record[0].message) == (
            "The `OldFashionedExcludedView.exclude_from_schema` attribute is "
            "pending deprecation. Set `schema = None` instead."
        )
def test_discard_failing_expectations():
    df = ge.dataset.PandasDataset(
        {
            "A": [1, 2, 3, 4],
            "B": [5, 6, 7, 8],
            "C": ["a", "b", "c", "d"],
            "D": ["e", "f", "g", "h"],
        },
        profiler=ge.profile.ColumnsExistProfiler,
    )

    # Put some simple expectations on the data frame
    df.expect_column_values_to_be_in_set("A", [1, 2, 3, 4])
    df.expect_column_values_to_be_in_set("B", [5, 6, 7, 8])
    df.expect_column_values_to_be_in_set("C", ["a", "b", "c", "d"])
    df.expect_column_values_to_be_in_set("D", ["e", "f", "g", "h"])

    exp1 = [
        ExpectationConfiguration(
            expectation_type="expect_column_to_exist", kwargs={"column": "A"}
        ),
        ExpectationConfiguration(
            expectation_type="expect_column_to_exist", kwargs={"column": "B"}
        ),
        ExpectationConfiguration(
            expectation_type="expect_column_to_exist", kwargs={"column": "C"}
        ),
        ExpectationConfiguration(
            expectation_type="expect_column_to_exist", kwargs={"column": "D"}
        ),
        ExpectationConfiguration(
            expectation_type="expect_column_values_to_be_in_set",
            kwargs={"column": "A", "value_set": [1, 2, 3, 4]},
        ),
        ExpectationConfiguration(
            expectation_type="expect_column_values_to_be_in_set",
            kwargs={"column": "B", "value_set": [5, 6, 7, 8]},
        ),
        ExpectationConfiguration(
            expectation_type="expect_column_values_to_be_in_set",
            kwargs={"column": "C", "value_set": ["a", "b", "c", "d"]},
        ),
        ExpectationConfiguration(
            expectation_type="expect_column_values_to_be_in_set",
            kwargs={"column": "D", "value_set": ["e", "f", "g", "h"]},
        ),
    ]

    sub1 = df[:3]

    sub1.discard_failing_expectations()
    assert sub1.find_expectations() == exp1

    sub1 = df[1:2]
    sub1.discard_failing_expectations()
    assert sub1.find_expectations() == exp1

    sub1 = df[:-1]
    sub1.discard_failing_expectations()
    assert sub1.find_expectations() == exp1

    sub1 = df[-1:]
    sub1.discard_failing_expectations()
    assert sub1.find_expectations() == exp1

    sub1 = df[["A", "D"]]
    exp1 = [
        ExpectationConfiguration(
            expectation_type="expect_column_to_exist", kwargs={"column": "A"}
        ),
        ExpectationConfiguration(
            expectation_type="expect_column_to_exist", kwargs={"column": "D"}
        ),
        ExpectationConfiguration(
            expectation_type="expect_column_values_to_be_in_set",
            kwargs={"column": "A", "value_set": [1, 2, 3, 4]},
        ),
        ExpectationConfiguration(
            expectation_type="expect_column_values_to_be_in_set",
            kwargs={"column": "D", "value_set": ["e", "f", "g", "h"]},
        ),
    ]
    with pytest.warns(UserWarning, match=r"Removed \d expectations that were 'False'"):
        sub1.discard_failing_expectations()
    assert sub1.find_expectations() == exp1

    sub1 = df[["A"]]
    exp1 = [
        ExpectationConfiguration(
            expectation_type="expect_column_to_exist", kwargs={"column": "A"}
        ),
        ExpectationConfiguration(
            expectation_type="expect_column_values_to_be_in_set",
            kwargs={"column": "A", "value_set": [1, 2, 3, 4]},
        ),
    ]
    with pytest.warns(UserWarning, match=r"Removed \d expectations that were 'False'"):
        sub1.discard_failing_expectations()
    assert sub1.find_expectations() == exp1

    sub1 = df.iloc[:3, 1:4]
    exp1 = [
        ExpectationConfiguration(
            expectation_type="expect_column_to_exist", kwargs={"column": "B"}
        ),
        ExpectationConfiguration(
            expectation_type="expect_column_to_exist", kwargs={"column": "C"}
        ),
        ExpectationConfiguration(
            expectation_type="expect_column_to_exist", kwargs={"column": "D"}
        ),
        ExpectationConfiguration(
            expectation_type="expect_column_values_to_be_in_set",
            kwargs={"column": "B", "value_set": [5, 6, 7, 8]},
        ),
        ExpectationConfiguration(
            expectation_type="expect_column_values_to_be_in_set",
            kwargs={"column": "C", "value_set": ["a", "b", "c", "d"]},
        ),
        ExpectationConfiguration(
            expectation_type="expect_column_values_to_be_in_set",
            kwargs={"column": "D", "value_set": ["e", "f", "g", "h"]},
        ),
    ]
    with pytest.warns(UserWarning, match=r"Removed \d expectations that were 'False'"):
        sub1.discard_failing_expectations()
    assert sub1.find_expectations() == exp1

    sub1 = df.loc[0:, "A":"B"]
    exp1 = [
        ExpectationConfiguration(
            expectation_type="expect_column_to_exist", kwargs={"column": "A"}
        ),
        ExpectationConfiguration(
            expectation_type="expect_column_to_exist", kwargs={"column": "B"}
        ),
        ExpectationConfiguration(
            expectation_type="expect_column_values_to_be_in_set",
            kwargs={"column": "A", "value_set": [1, 2, 3, 4]},
        ),
        ExpectationConfiguration(
            expectation_type="expect_column_values_to_be_in_set",
            kwargs={"column": "B", "value_set": [5, 6, 7, 8]},
        ),
    ]
    with pytest.warns(UserWarning, match=r"Removed \d expectations that were 'False'"):
        sub1.discard_failing_expectations()
    assert sub1.find_expectations() == exp1
 def test_area_crs_warn(self):
     with pytest.warns(UserWarning, match="Geometry is in a geographic CRS"):
         self.g4.area
 def test_intersection(self):
     self._test_binary_topological("intersection", self.t1, self.g1, self.g2)
     with pytest.warns(UserWarning, match="The indices .+ different"):
         self._test_binary_topological(
             "intersection", self.all_none, self.g1, self.empty
         )
Example #34
0
def test_DivergingNorm_deprecated():
    with pytest.warns(cbook.MatplotlibDeprecationWarning):
        norm = mcolors.DivergingNorm(vcenter=0)
Example #35
0
def test_warning_dewpoint_rh():
    """Test that warning is raised for >120% RH."""
    with pytest.warns(UserWarning):
        dewpoint_rh(10.6 * units.degC, 50)
def test_read_file_privacy(tmpdir, df_nybb):
    with pytest.warns(DeprecationWarning):
        geopandas.io.file.read_file(geopandas.datasets.get_path("nybb"))
def test_to_file_privacy(tmpdir, df_nybb):
    tempfilename = os.path.join(str(tmpdir), "test.shp")
    with pytest.warns(DeprecationWarning):
        geopandas.io.file.to_file(df_nybb, tempfilename)
Example #38
0
def test_warn_external_frame_embedded_python():
    with patch.object(cbook, "sys") as mock_sys:
        mock_sys._getframe = Mock(return_value=None)
        with pytest.warns(UserWarning, match=r"\Adummy\Z"):
            cbook._warn_external("dummy")
Example #39
0
def test___setitem__(data):
    modin_df = pd.DataFrame(data)
    pandas_df = pandas.DataFrame(data)

    modin_df.__setitem__(modin_df.columns[-1], 1)
    pandas_df.__setitem__(pandas_df.columns[-1], 1)
    df_equals(modin_df, pandas_df)

    modin_df = pd.DataFrame(data)
    pandas_df = pandas.DataFrame(data)

    modin_df[modin_df.columns[-1]] = pd.DataFrame(
        modin_df[modin_df.columns[0]])
    pandas_df[pandas_df.columns[-1]] = pandas.DataFrame(
        pandas_df[pandas_df.columns[0]])
    df_equals(modin_df, pandas_df)

    modin_df = pd.DataFrame(data)
    pandas_df = pandas.DataFrame(data)

    rows = len(modin_df)
    arr = np.arange(rows * 2).reshape(-1, 2)
    modin_df[modin_df.columns[-1]] = arr
    pandas_df[pandas_df.columns[-1]] = arr
    df_equals(pandas_df, modin_df)

    with pytest.raises(ValueError, match=r"Wrong number of items passed"):
        modin_df["___NON EXISTENT COLUMN"] = arr

    modin_df[modin_df.columns[0]] = np.arange(len(modin_df))
    pandas_df[pandas_df.columns[0]] = np.arange(len(pandas_df))
    df_equals(modin_df, pandas_df)

    modin_df = pd.DataFrame(columns=modin_df.columns)
    pandas_df = pandas.DataFrame(columns=pandas_df.columns)

    for col in modin_df.columns:
        modin_df[col] = np.arange(1000)

    for col in pandas_df.columns:
        pandas_df[col] = np.arange(1000)

    df_equals(modin_df, pandas_df)

    # Test series assignment to column
    modin_df = pd.DataFrame(columns=modin_df.columns)
    pandas_df = pandas.DataFrame(columns=pandas_df.columns)
    modin_df[modin_df.columns[-1]] = modin_df[modin_df.columns[0]]
    pandas_df[pandas_df.columns[-1]] = pandas_df[pandas_df.columns[0]]
    df_equals(modin_df, pandas_df)

    if not sys.version_info.major == 3 and sys.version_info.minor > 6:
        # This test doesn't work correctly on Python 3.6
        # Test 2d ndarray assignment to column
        modin_df = pd.DataFrame(data)
        pandas_df = pandas.DataFrame(data)
        modin_df["new_col"] = modin_df[[modin_df.columns[0]]].values
        pandas_df["new_col"] = pandas_df[[pandas_df.columns[0]]].values
        df_equals(modin_df, pandas_df)
        assert isinstance(modin_df["new_col"][0],
                          type(pandas_df["new_col"][0]))

    # Transpose test
    modin_df = pd.DataFrame(data).T
    pandas_df = pandas.DataFrame(data).T

    # We default to pandas on non-string column names
    if not all(isinstance(c, str) for c in modin_df.columns):
        with pytest.warns(UserWarning):
            modin_df[modin_df.columns[0]] = 0
    else:
        modin_df[modin_df.columns[0]] = 0

    pandas_df[pandas_df.columns[0]] = 0

    df_equals(modin_df, pandas_df)

    modin_df.columns = [str(i) for i in modin_df.columns]
    pandas_df.columns = [str(i) for i in pandas_df.columns]

    modin_df[modin_df.columns[0]] = 0
    pandas_df[pandas_df.columns[0]] = 0

    df_equals(modin_df, pandas_df)

    modin_df[modin_df.columns[0]][modin_df.index[0]] = 12345
    pandas_df[pandas_df.columns[0]][pandas_df.index[0]] = 12345

    df_equals(modin_df, pandas_df)
Example #40
0
def test_reregistration_raises_warning(config):
    with pytest.warns(UserWarning, match='.*Double registration of the same service.*'):
        config.register_di_service(InterfacedService, scope='global')
def test_dataframe_iterator(all_test_images, tmpdir):
    num_classes = 2

    # save the images in the tmpdir
    count = 0
    filenames = []
    filepaths = []
    filenames_without = []
    for test_images in all_test_images:
        for im in test_images:
            filename = "image-{}.png".format(count)
            filename_without = "image-{}".format(count)
            filenames.append(filename)
            filepaths.append(os.path.join(str(tmpdir), filename))
            filenames_without.append(filename_without)
            im.save(str(tmpdir / filename))
            count += 1

    df = pd.DataFrame({
        "filename": filenames,
        "class": [str(random.randint(0, 1)) for _ in filenames],
        "filepaths": filepaths
    })

    # create iterator
    iterator = dataframe_iterator.DataFrameIterator(df, str(tmpdir))
    batch = next(iterator)
    assert len(batch) == 2
    assert isinstance(batch[0], np.ndarray)
    assert isinstance(batch[1], np.ndarray)
    generator = image_data_generator.ImageDataGenerator()
    df_iterator = generator.flow_from_dataframe(df, x_col='filepaths')
    df_iterator_dir = generator.flow_from_dataframe(df, str(tmpdir))
    df_sparse_iterator = generator.flow_from_dataframe(df, str(tmpdir),
                                                       class_mode="sparse")
    assert not np.isnan(df_sparse_iterator.classes).any()
    # check number of classes and images
    assert len(df_iterator.class_indices) == num_classes
    assert len(df_iterator.classes) == count
    assert set(df_iterator.filenames) == set(filepaths)
    assert len(df_iterator_dir.class_indices) == num_classes
    assert len(df_iterator_dir.classes) == count
    assert set(df_iterator_dir.filenames) == set(filenames)
    # test without shuffle
    _, batch_y = next(generator.flow_from_dataframe(df, str(tmpdir),
                                                    shuffle=False,
                                                    class_mode="sparse"))
    assert (batch_y == df['class'].astype('float')[:len(batch_y)]).all()
    # Test invalid use cases
    with pytest.raises(ValueError):
        generator.flow_from_dataframe(df, str(tmpdir), color_mode='cmyk')
    with pytest.raises(ValueError):
        generator.flow_from_dataframe(df, str(tmpdir), class_mode='output')
    with pytest.warns(DeprecationWarning):
        generator.flow_from_dataframe(df, str(tmpdir), has_ext=True)
    with pytest.warns(DeprecationWarning):
        generator.flow_from_dataframe(df, str(tmpdir), has_ext=False)

    def preprocessing_function(x):
        """This will fail if not provided by a Numpy array.
        Note: This is made to enforce backward compatibility.
        """

        assert x.shape == (26, 26, 3)
        assert type(x) is np.ndarray

        return np.zeros_like(x)

    # Test usage as Sequence
    generator = image_data_generator.ImageDataGenerator(
        preprocessing_function=preprocessing_function)
    dir_seq = generator.flow_from_dataframe(df, str(tmpdir),
                                            target_size=(26, 26),
                                            color_mode='rgb',
                                            batch_size=3,
                                            class_mode='categorical')
    assert len(dir_seq) == np.ceil(count / 3)
    x1, y1 = dir_seq[1]
    assert x1.shape == (3, 26, 26, 3)
    assert y1.shape == (3, num_classes)
    x1, y1 = dir_seq[5]
    assert (x1 == 0).all()

    with pytest.raises(ValueError):
        x1, y1 = dir_seq[9]
def test_custom_generated_fields():
    """Test fields.generated fields."""

    with pytest.warns(RuntimeWarning):

        def serialize_func(obj, ctx):
            return ctx.get('func-foo', obj.get('func-bar', missing))

        def deserialize_func(value, ctx, data):
            return ctx.get('func-foo', data.get('func-bar', missing))

        class GeneratedFieldsSchema(StrictKeysMixin):
            """Test schema."""

            gen_function = GenFunction(
                serialize=serialize_func,
                deserialize=deserialize_func,
            )

            gen_method = GenMethod(
                serialize='_serialize_gen_method',
                deserialize='_desererialize_gen_method',
                missing='raises-warning',
            )

            def _serialize_gen_method(self, obj):
                # "meth-foo" from context or "meth-bar" from the object
                return self.context.get('meth-foo',
                                        obj.get('meth-bar', missing))

            def _desererialize_gen_method(self, value, data):
                # "meth-foo" from context or "meth-bar" from the data
                return self.context.get('meth-foo',
                                        data.get('meth-bar', missing))

    ctx = {
        'func-foo': 'ctx-func-value',
        'meth-foo': 'ctx-meth-value',
    }
    data = {
        'func-bar': 'data-func-value',
        'meth-bar': 'data-meth-value',
        'gen_function': 'original-func-value',
        'gen_method': 'original-meth-value',
    }

    # No context, no data
    assert GeneratedFieldsSchema().load({}).data == {}
    assert GeneratedFieldsSchema().dump({}).data == {}

    # Only context
    assert GeneratedFieldsSchema(context=ctx).load({}).data == {
        'gen_function': 'ctx-func-value',
        'gen_method': 'ctx-meth-value',
    }
    assert GeneratedFieldsSchema(context=ctx).dump({}).data == {
        'gen_function': 'ctx-func-value',
        'gen_method': 'ctx-meth-value',
    }

    # Only data
    assert GeneratedFieldsSchema().load(data).data == {
        'gen_function': 'data-func-value',
        'gen_method': 'data-meth-value',
    }
    assert GeneratedFieldsSchema().dump(data).data == {
        'gen_function': 'data-func-value',
        'gen_method': 'data-meth-value',
    }

    # Context and data
    assert GeneratedFieldsSchema(context=ctx).load(data).data == {
        'gen_function': 'ctx-func-value',
        'gen_method': 'ctx-meth-value',
    }
    assert GeneratedFieldsSchema(context=ctx).dump(data).data == {
        'gen_function': 'ctx-func-value',
        'gen_method': 'ctx-meth-value',
    }
Example #43
0
    def test_context_manager(self):
        with pytest.warns(None) as record:
            with Image.open("Tests/images/multipage.tiff") as im:
                im.load()

        assert not record
def function1949():
    var477 = ClientResponse('get', URL('http://fake-host.org/'))
    with pytest.warns(DeprecationWarning):
        var477.url_obj
Example #45
0
 def test_bad_exif(self):
     with Image.open("Tests/images/hopper_bad_exif.jpg") as i:
         # Should not raise struct.error.
         pytest.warns(UserWarning, i._getexif)
Example #46
0
 def test_charge_neutrality_warn(self, benzene):
     benzene[0].charge = 0.25
     with pytest.warns(UserWarning):
         benzene.save('charge-test.mol2')
Example #47
0
def test_set_eeg_reference():
    """Test rereference eeg data."""
    raw = read_raw_fif(fif_fname, preload=True)
    raw.info['projs'] = []

    # Test setting an average reference projection
    assert (not _has_eeg_average_ref_proj(raw.info['projs']))
    reref, ref_data = set_eeg_reference(raw, projection=True)
    assert (_has_eeg_average_ref_proj(reref.info['projs']))
    assert (not reref.info['projs'][0]['active'])
    assert (ref_data is None)
    reref.apply_proj()
    eeg_chans = [
        raw.ch_names[ch] for ch in pick_types(raw.info, meg=False, eeg=True)
    ]
    _test_reference(raw, reref, ref_data,
                    [ch for ch in eeg_chans if ch not in raw.info['bads']])

    # Test setting an average reference when one was already present
    with pytest.warns(RuntimeWarning, match='untouched'):
        reref, ref_data = set_eeg_reference(raw, copy=False, projection=True)
    assert ref_data is None

    # Test setting an average reference on non-preloaded data
    raw_nopreload = read_raw_fif(fif_fname, preload=False)
    raw_nopreload.info['projs'] = []
    reref, ref_data = set_eeg_reference(raw_nopreload, projection=True)
    assert _has_eeg_average_ref_proj(reref.info['projs'])
    assert not reref.info['projs'][0]['active']

    # Rereference raw data by creating a copy of original data
    reref, ref_data = set_eeg_reference(raw, ['EEG 001', 'EEG 002'], copy=True)
    assert reref.info['custom_ref_applied']
    _test_reference(raw, reref, ref_data, ['EEG 001', 'EEG 002'])

    # Test that data is modified in place when copy=False
    reref, ref_data = set_eeg_reference(raw, ['EEG 001', 'EEG 002'],
                                        copy=False)
    assert raw is reref

    # Test moving from custom to average reference
    reref, ref_data = set_eeg_reference(raw, ['EEG 001', 'EEG 002'])
    reref, _ = set_eeg_reference(reref, projection=True)
    assert _has_eeg_average_ref_proj(reref.info['projs'])
    assert not reref.info['custom_ref_applied']

    # When creating an average reference fails, make sure the
    # custom_ref_applied flag remains untouched.
    reref = raw.copy()
    reref.info['custom_ref_applied'] = FIFF.FIFFV_MNE_CUSTOM_REF_ON
    reref.pick_types(meg=True, eeg=False)  # Cause making average ref fail
    pytest.raises(ValueError, set_eeg_reference, reref, projection=True)
    assert reref.info['custom_ref_applied'] == FIFF.FIFFV_MNE_CUSTOM_REF_ON

    # Test moving from average to custom reference
    reref, ref_data = set_eeg_reference(raw, projection=True)
    reref, _ = set_eeg_reference(reref, ['EEG 001', 'EEG 002'])
    assert not _has_eeg_average_ref_proj(reref.info['projs'])
    assert len(reref.info['projs']) == 0
    assert reref.info['custom_ref_applied'] == FIFF.FIFFV_MNE_CUSTOM_REF_ON

    # Test that disabling the reference does not change the data
    assert _has_eeg_average_ref_proj(raw.info['projs'])
    reref, _ = set_eeg_reference(raw, [])
    assert_array_equal(raw._data, reref._data)
    assert not _has_eeg_average_ref_proj(reref.info['projs'])

    # make sure ref_channels=[] removes average reference projectors
    assert _has_eeg_average_ref_proj(raw.info['projs'])
    reref, _ = set_eeg_reference(raw, [])
    assert (not _has_eeg_average_ref_proj(reref.info['projs']))

    # Test that average reference gives identical results when calculated
    # via SSP projection (projection=True) or directly (projection=False)
    raw.info['projs'] = []
    reref_1, _ = set_eeg_reference(raw.copy(), projection=True)
    reref_1.apply_proj()
    reref_2, _ = set_eeg_reference(raw.copy(), projection=False)
    assert_allclose(reref_1._data, reref_2._data, rtol=1e-6, atol=1e-15)

    # Test average reference without projection
    reref, ref_data = set_eeg_reference(raw.copy(),
                                        ref_channels="average",
                                        projection=False)
    _test_reference(raw, reref, ref_data, eeg_chans)

    with pytest.raises(ValueError, match='supported for ref_channels="averag'):
        set_eeg_reference(raw, [], True, True)
    with pytest.raises(ValueError, match='supported for ref_channels="averag'):
        set_eeg_reference(raw, ['EEG 001'], True, True)
Example #48
0
    def test_unclosed_file(self):
        def open():
            im = Image.open("Tests/images/multipage.tiff")
            im.load()

        pytest.warns(ResourceWarning, open)
Example #49
0
def test_read_table_doesnt_warn(datadir, use_legacy_dataset):
    with pytest.warns(None) as record:
        pq.read_table(datadir / 'v0.7.1.parquet',
                      use_legacy_dataset=use_legacy_dataset)

    assert len(record) == 0
Example #50
0
def test_add_reference():
    """Test adding a reference."""
    raw = read_raw_fif(fif_fname, preload=True)
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
    # check if channel already exists
    pytest.raises(ValueError, add_reference_channels, raw,
                  raw.info['ch_names'][0])
    # add reference channel to Raw
    raw_ref = add_reference_channels(raw, 'Ref', copy=True)
    assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 1)
    assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :])
    _check_channel_names(raw_ref, 'Ref')

    orig_nchan = raw.info['nchan']
    raw = add_reference_channels(raw, 'Ref', copy=False)
    assert_array_equal(raw._data, raw_ref._data)
    assert_equal(raw.info['nchan'], orig_nchan + 1)
    _check_channel_names(raw, 'Ref')

    # for Neuromag fif's, the reference electrode location is placed in
    # elements [3:6] of each "data" electrode location
    assert_allclose(raw.info['chs'][-1]['loc'][:3],
                    raw.info['chs'][picks_eeg[0]]['loc'][3:6], 1e-6)

    ref_idx = raw.ch_names.index('Ref')
    ref_data, _ = raw[ref_idx]
    assert_array_equal(ref_data, 0)

    # add reference channel to Raw when no digitization points exist
    raw = read_raw_fif(fif_fname).crop(0, 1).load_data()
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
    del raw.info['dig']

    raw_ref = add_reference_channels(raw, 'Ref', copy=True)

    assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 1)
    assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :])
    _check_channel_names(raw_ref, 'Ref')

    orig_nchan = raw.info['nchan']
    raw = add_reference_channels(raw, 'Ref', copy=False)
    assert_array_equal(raw._data, raw_ref._data)
    assert_equal(raw.info['nchan'], orig_nchan + 1)
    _check_channel_names(raw, 'Ref')

    # Test adding an existing channel as reference channel
    pytest.raises(ValueError, add_reference_channels, raw,
                  raw.info['ch_names'][0])

    # add two reference channels to Raw
    raw_ref = add_reference_channels(raw, ['M1', 'M2'], copy=True)
    _check_channel_names(raw_ref, ['M1', 'M2'])
    assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 2)
    assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :])
    assert_array_equal(raw_ref._data[-2:, :], 0)

    raw = add_reference_channels(raw, ['M1', 'M2'], copy=False)
    _check_channel_names(raw, ['M1', 'M2'])
    ref_idx = raw.ch_names.index('M1')
    ref_idy = raw.ch_names.index('M2')
    ref_data, _ = raw[[ref_idx, ref_idy]]
    assert_array_equal(ref_data, 0)

    # add reference channel to epochs
    raw = read_raw_fif(fif_fname, preload=True)
    events = read_events(eve_fname)
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
    epochs = Epochs(raw,
                    events=events,
                    event_id=1,
                    tmin=-0.2,
                    tmax=0.5,
                    picks=picks_eeg,
                    preload=True)
    # default: proj=True, after which adding a Ref channel is prohibited
    pytest.raises(RuntimeError, add_reference_channels, epochs, 'Ref')

    # create epochs in delayed mode, allowing removal of CAR when re-reffing
    epochs = Epochs(raw,
                    events=events,
                    event_id=1,
                    tmin=-0.2,
                    tmax=0.5,
                    picks=picks_eeg,
                    preload=True,
                    proj='delayed')
    epochs_ref = add_reference_channels(epochs, 'Ref', copy=True)

    assert_equal(epochs_ref._data.shape[1], epochs._data.shape[1] + 1)
    _check_channel_names(epochs_ref, 'Ref')
    ref_idx = epochs_ref.ch_names.index('Ref')
    ref_data = epochs_ref.get_data()[:, ref_idx, :]
    assert_array_equal(ref_data, 0)
    picks_eeg = pick_types(epochs.info, meg=False, eeg=True)
    assert_array_equal(epochs.get_data()[:, picks_eeg, :],
                       epochs_ref.get_data()[:, picks_eeg, :])

    # add two reference channels to epochs
    raw = read_raw_fif(fif_fname, preload=True)
    events = read_events(eve_fname)
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
    # create epochs in delayed mode, allowing removal of CAR when re-reffing
    epochs = Epochs(raw,
                    events=events,
                    event_id=1,
                    tmin=-0.2,
                    tmax=0.5,
                    picks=picks_eeg,
                    preload=True,
                    proj='delayed')
    with pytest.warns(RuntimeWarning, match='reference channels are ignored'):
        epochs_ref = add_reference_channels(epochs, ['M1', 'M2'], copy=True)
    assert_equal(epochs_ref._data.shape[1], epochs._data.shape[1] + 2)
    _check_channel_names(epochs_ref, ['M1', 'M2'])
    ref_idx = epochs_ref.ch_names.index('M1')
    ref_idy = epochs_ref.ch_names.index('M2')
    assert_equal(epochs_ref.info['chs'][ref_idx]['ch_name'], 'M1')
    assert_equal(epochs_ref.info['chs'][ref_idy]['ch_name'], 'M2')
    ref_data = epochs_ref.get_data()[:, [ref_idx, ref_idy], :]
    assert_array_equal(ref_data, 0)
    picks_eeg = pick_types(epochs.info, meg=False, eeg=True)
    assert_array_equal(epochs.get_data()[:, picks_eeg, :],
                       epochs_ref.get_data()[:, picks_eeg, :])

    # add reference channel to evoked
    raw = read_raw_fif(fif_fname, preload=True)
    events = read_events(eve_fname)
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
    # create epochs in delayed mode, allowing removal of CAR when re-reffing
    epochs = Epochs(raw,
                    events=events,
                    event_id=1,
                    tmin=-0.2,
                    tmax=0.5,
                    picks=picks_eeg,
                    preload=True,
                    proj='delayed')
    evoked = epochs.average()
    evoked_ref = add_reference_channels(evoked, 'Ref', copy=True)
    assert_equal(evoked_ref.data.shape[0], evoked.data.shape[0] + 1)
    _check_channel_names(evoked_ref, 'Ref')
    ref_idx = evoked_ref.ch_names.index('Ref')
    ref_data = evoked_ref.data[ref_idx, :]
    assert_array_equal(ref_data, 0)
    picks_eeg = pick_types(evoked.info, meg=False, eeg=True)
    assert_array_equal(evoked.data[picks_eeg, :],
                       evoked_ref.data[picks_eeg, :])

    # add two reference channels to evoked
    raw = read_raw_fif(fif_fname, preload=True)
    events = read_events(eve_fname)
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)
    # create epochs in delayed mode, allowing removal of CAR when re-reffing
    epochs = Epochs(raw,
                    events=events,
                    event_id=1,
                    tmin=-0.2,
                    tmax=0.5,
                    picks=picks_eeg,
                    preload=True,
                    proj='delayed')
    evoked = epochs.average()
    with pytest.warns(RuntimeWarning, match='reference channels are ignored'):
        evoked_ref = add_reference_channels(evoked, ['M1', 'M2'], copy=True)
    assert_equal(evoked_ref.data.shape[0], evoked.data.shape[0] + 2)
    _check_channel_names(evoked_ref, ['M1', 'M2'])
    ref_idx = evoked_ref.ch_names.index('M1')
    ref_idy = evoked_ref.ch_names.index('M2')
    ref_data = evoked_ref.data[[ref_idx, ref_idy], :]
    assert_array_equal(ref_data, 0)
    picks_eeg = pick_types(evoked.info, meg=False, eeg=True)
    assert_array_equal(evoked.data[picks_eeg, :],
                       evoked_ref.data[picks_eeg, :])

    # Test invalid inputs
    raw = read_raw_fif(fif_fname, preload=False)
    with pytest.raises(RuntimeError, match='loaded'):
        add_reference_channels(raw, ['Ref'])
    raw.load_data()
    with pytest.raises(ValueError, match='Channel.*already.*'):
        add_reference_channels(raw, raw.ch_names[:1])
    with pytest.raises(TypeError, match='instance of'):
        add_reference_channels(raw, 1)
Example #51
0
def test_arg_string_kwargs_are_bad_repr_safe():
    with pytest.warns(HypothesisDeprecationWarning):
        assert arg_string(varargs, (), {u'x': Frosty}) == u'x=☃'
def test_no_resource_warning_with_context_manager():
    with pytest.warns(None) as record:  # ensure no warnings are raised
        with tradeapi.REST('key-id', 'secret-key', api_version='v1') as api:
            assert api
    assert not record
Example #53
0
def test_sampling_snowmen():
    with pytest.warns(HypothesisDeprecationWarning):
        assert unicode_safe_repr(st.sampled_from((
            Frosty, u'hi'))) == u'sampled_from((☃, %s))' % (repr(u'hi'),)
def test_submodular_pick():
    """Tests :func:`fatf.transparency.models.submodular_pick`."""
    fatf.setup_random_seed()

    explanations, explanation_ind = ftms.submodular_pick(
        NUMERICAL_NP_ARRAY, explain_instance_a, explanations_number=2)
    assert explanation_ind == [0, 2]
    assert explanations == [EXPLAINERS[0], EXPLAINERS[2]]

    explanations, explanation_ind = ftms.submodular_pick(
        NUMERICAL_NP_ARRAY, explain_instance_b, explanations_number=2)
    assert explanation_ind == [0, 1]
    assert explanations == [EXPLAINERS[3], EXPLAINERS[2]]

    msg = ('sample_size is larger than the number of samples in the data set. '
           'The whole dataset will be used.')
    with pytest.warns(UserWarning) as warning:
        explanations, explanation_ind = ftms.submodular_pick(
            NUMERICAL_NP_ARRAY,
            explain_instance_a,
            sample_size=100,
            explanations_number=1)
    assert len(warning) == 1
    assert str(warning[0].message) == msg
    assert explanation_ind == [0]
    assert explanations == [EXPLAINERS[0]]

    explanations, explanation_ind = ftms.submodular_pick(
        NUMERICAL_NP_ARRAY,
        explain_instance_a,
        sample_size=1,
        explanations_number=1)
    assert explanation_ind == [1]
    assert explanations == [EXPLAINERS[1]]

    explanations, explanation_ind = ftms.submodular_pick(
        NUMERICAL_NP_ARRAY,
        explain_instance_a,
        sample_size=0,
        explanations_number=0)
    assert explanation_ind == [0, 2, 1, 3]
    assert explanations == [
        EXPLAINERS[0], EXPLAINERS[2], EXPLAINERS[1], EXPLAINERS[3]
    ]

    explanations, explanation_ind = ftms.submodular_pick(
        NUMERICAL_NP_ARRAY,
        explain_instance_a,
        sample_size=2,
        explanations_number=0)
    assert explanation_ind == [3, 1]
    assert explanations == [EXPLAINERS[3], EXPLAINERS[1]]

    msg = ('The number of explanations cannot be larger than '
           'the number of instances (rows) in the data set.')
    with pytest.warns(UserWarning) as warning:
        explanations, explanation_ind = ftms.submodular_pick(
            NUMERICAL_NP_ARRAY, explain_instance_a, 0, 222)
    assert len(warning) == 1
    assert str(warning[0].message) == msg
    assert explanation_ind == [0, 2, 1, 3]
    assert explanations == [
        EXPLAINERS[0], EXPLAINERS[2], EXPLAINERS[1], EXPLAINERS[3]
    ]
Example #55
0
def test_regression_binary2(tmpdir):
    # W39: Bit values can not be masked
    with pytest.warns(W39):
        _test_regression(tmpdir, False, 2)
Example #56
0
def test_arg_strings_are_bad_repr_safe():
    with pytest.warns(HypothesisDeprecationWarning):
        assert arg_string(varargs, (Frosty,), {}) == u'☃'
Example #57
0
def test_plot_alignment(tmpdir, renderer):
    """Test plotting of -trans.fif files and MEG sensor layouts."""
    # generate fiducials file for testing
    tempdir = str(tmpdir)
    fiducials_path = op.join(tempdir, 'fiducials.fif')
    fid = [{
        'coord_frame': 5,
        'ident': 1,
        'kind': 1,
        'r': [-0.08061612, -0.02908875, -0.04131077]
    }, {
        'coord_frame': 5,
        'ident': 2,
        'kind': 1,
        'r': [0.00146763, 0.08506715, -0.03483611]
    }, {
        'coord_frame': 5,
        'ident': 3,
        'kind': 1,
        'r': [0.08436285, -0.02850276, -0.04127743]
    }]
    write_dig(fiducials_path, fid, 5)

    renderer._close_all()
    evoked = read_evokeds(evoked_fname)[0]
    sample_src = read_source_spaces(src_fname)
    bti = read_raw_bti(pdf_fname,
                       config_fname,
                       hs_fname,
                       convert=True,
                       preload=False).info
    infos = dict(
        Neuromag=evoked.info,
        CTF=read_raw_ctf(ctf_fname).info,
        BTi=bti,
        KIT=read_raw_kit(sqd_fname).info,
    )
    for system, info in infos.items():
        meg = ['helmet', 'sensors']
        if system == 'KIT':
            meg.append('ref')
        fig = plot_alignment(info,
                             read_trans(trans_fname),
                             subject='sample',
                             subjects_dir=subjects_dir,
                             meg=meg)
        rend = renderer._Renderer(fig=fig)
        rend.close()
    # KIT ref sensor coil def is defined
    renderer._close_all()
    info = infos['Neuromag']
    pytest.raises(TypeError,
                  plot_alignment,
                  'foo',
                  trans_fname,
                  subject='sample',
                  subjects_dir=subjects_dir)
    pytest.raises(OSError,
                  plot_alignment,
                  info,
                  trans_fname,
                  subject='sample',
                  subjects_dir=subjects_dir,
                  src='foo')
    pytest.raises(ValueError,
                  plot_alignment,
                  info,
                  trans_fname,
                  subject='fsaverage',
                  subjects_dir=subjects_dir,
                  src=sample_src)
    sample_src.plot(subjects_dir=subjects_dir,
                    head=True,
                    skull=True,
                    brain='white')
    renderer._close_all()
    # no-head version
    renderer._close_all()
    # all coord frames
    pytest.raises(ValueError, plot_alignment, info)
    plot_alignment(info, surfaces=[])
    for coord_frame in ('meg', 'head', 'mri'):
        fig = plot_alignment(info,
                             meg=['helmet', 'sensors'],
                             dig=True,
                             coord_frame=coord_frame,
                             trans=Path(trans_fname),
                             subject='sample',
                             mri_fiducials=fiducials_path,
                             subjects_dir=subjects_dir,
                             src=src_fname)
    renderer._close_all()
    # EEG only with strange options
    evoked_eeg_ecog_seeg = evoked.copy().pick_types(meg=False, eeg=True)
    evoked_eeg_ecog_seeg.info['projs'] = []  # "remove" avg proj
    evoked_eeg_ecog_seeg.set_channel_types({
        'EEG 001': 'ecog',
        'EEG 002': 'seeg'
    })
    with pytest.warns(RuntimeWarning, match='Cannot plot MEG'):
        plot_alignment(evoked_eeg_ecog_seeg.info,
                       subject='sample',
                       trans=trans_fname,
                       subjects_dir=subjects_dir,
                       surfaces=['white', 'outer_skin', 'outer_skull'],
                       meg=['helmet', 'sensors'],
                       eeg=['original', 'projected'],
                       ecog=True,
                       seeg=True)
    renderer._close_all()

    sphere = make_sphere_model(info=evoked.info, r0='auto', head_radius='auto')
    bem_sol = read_bem_solution(
        op.join(subjects_dir, 'sample', 'bem',
                'sample-1280-1280-1280-bem-sol.fif'))
    bem_surfs = read_bem_surfaces(
        op.join(subjects_dir, 'sample', 'bem',
                'sample-1280-1280-1280-bem.fif'))
    sample_src[0]['coord_frame'] = 4  # hack for coverage
    plot_alignment(
        info,
        subject='sample',
        eeg='projected',
        meg='helmet',
        bem=sphere,
        dig=True,
        surfaces=['brain', 'inner_skull', 'outer_skull', 'outer_skin'])
    plot_alignment(info,
                   trans_fname,
                   subject='sample',
                   meg='helmet',
                   subjects_dir=subjects_dir,
                   eeg='projected',
                   bem=sphere,
                   surfaces=['head', 'brain'],
                   src=sample_src)
    assert all(surf['coord_frame'] == FIFF.FIFFV_COORD_MRI
               for surf in bem_sol['surfs'])
    plot_alignment(info,
                   trans_fname,
                   subject='sample',
                   meg=[],
                   subjects_dir=subjects_dir,
                   bem=bem_sol,
                   eeg=True,
                   surfaces=['head', 'inflated', 'outer_skull', 'inner_skull'])
    assert all(surf['coord_frame'] == FIFF.FIFFV_COORD_MRI
               for surf in bem_sol['surfs'])
    plot_alignment(info,
                   trans_fname,
                   subject='sample',
                   meg=True,
                   subjects_dir=subjects_dir,
                   surfaces=['head', 'inner_skull'],
                   bem=bem_surfs)
    # single-layer BEM can still plot head surface
    assert bem_surfs[-1]['id'] == FIFF.FIFFV_BEM_SURF_ID_BRAIN
    bem_sol_homog = read_bem_solution(
        op.join(subjects_dir, 'sample', 'bem', 'sample-1280-bem-sol.fif'))
    for use_bem in (bem_surfs[-1:], bem_sol_homog):
        with catch_logging() as log:
            plot_alignment(info,
                           trans_fname,
                           subject='sample',
                           meg=True,
                           subjects_dir=subjects_dir,
                           surfaces=['head', 'inner_skull'],
                           bem=use_bem,
                           verbose=True)
        log = log.getvalue()
        assert 'not find the surface for head in the provided BEM model' in log
    # sphere model
    sphere = make_sphere_model('auto', 'auto', evoked.info)
    src = setup_volume_source_space(sphere=sphere)
    plot_alignment(
        info,
        eeg='projected',
        meg='helmet',
        bem=sphere,
        src=src,
        dig=True,
        surfaces=['brain', 'inner_skull', 'outer_skull', 'outer_skin'])
    sphere = make_sphere_model('auto', None, evoked.info)  # one layer
    # no info is permitted
    fig = plot_alignment(trans=trans_fname,
                         subject='sample',
                         meg=False,
                         coord_frame='mri',
                         subjects_dir=subjects_dir,
                         surfaces=['brain'],
                         bem=sphere,
                         show_axes=True)
    renderer._close_all()
    if renderer.get_3d_backend() == 'mayavi':
        import mayavi  # noqa: F401 analysis:ignore
        assert isinstance(fig, mayavi.core.scene.Scene)

    # 3D coil with no defined draw (ConvexHull)
    info_cube = pick_info(info, [0])
    info['dig'] = None
    info_cube['chs'][0]['coil_type'] = 9999
    with pytest.raises(RuntimeError, match='coil definition not found'):
        plot_alignment(info_cube, meg='sensors', surfaces=())
    coil_def_fname = op.join(tempdir, 'temp')
    with open(coil_def_fname, 'w') as fid:
        fid.write(coil_3d)
    with use_coil_def(coil_def_fname):
        plot_alignment(info_cube, meg='sensors', surfaces=(), dig=True)

    # one layer bem with skull surfaces:
    with pytest.raises(ValueError, match='sphere conductor model must have'):
        plot_alignment(info=info,
                       trans=trans_fname,
                       subject='sample',
                       subjects_dir=subjects_dir,
                       surfaces=['brain', 'head', 'inner_skull'],
                       bem=sphere)
    # wrong eeg value:
    with pytest.raises(ValueError, match='eeg must only contain'):
        plot_alignment(info=info,
                       trans=trans_fname,
                       subject='sample',
                       subjects_dir=subjects_dir,
                       eeg='foo')
    # wrong meg value:
    with pytest.raises(ValueError, match='meg must only contain'):
        plot_alignment(info=info,
                       trans=trans_fname,
                       subject='sample',
                       subjects_dir=subjects_dir,
                       meg='bar')
    # multiple brain surfaces:
    with pytest.raises(ValueError, match='Only one brain surface can be plot'):
        plot_alignment(info=info,
                       trans=trans_fname,
                       subject='sample',
                       subjects_dir=subjects_dir,
                       surfaces=['white', 'pial'])
    with pytest.raises(TypeError, match='all entries in surfaces must be'):
        plot_alignment(info=info,
                       trans=trans_fname,
                       subject='sample',
                       subjects_dir=subjects_dir,
                       surfaces=[1])
    with pytest.raises(ValueError, match='Unknown surface type'):
        plot_alignment(info=info,
                       trans=trans_fname,
                       subject='sample',
                       subjects_dir=subjects_dir,
                       surfaces=['foo'])
    fwd_fname = op.join(data_dir, 'MEG', 'sample',
                        'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
    fwd = read_forward_solution(fwd_fname)
    plot_alignment(subject='sample',
                   subjects_dir=subjects_dir,
                   trans=trans_fname,
                   fwd=fwd,
                   surfaces='white',
                   coord_frame='head')
    fwd = convert_forward_solution(fwd, force_fixed=True)
    plot_alignment(subject='sample',
                   subjects_dir=subjects_dir,
                   trans=trans_fname,
                   fwd=fwd,
                   surfaces='white',
                   coord_frame='head')

    # fNIRS
    info = read_raw_nirx(nirx_fname).info
    with catch_logging() as log:
        plot_alignment(info, subject='fsaverage', surfaces=(), verbose=True)
    log = log.getvalue()
    assert '26 fnirs locations' in log

    renderer._close_all()
Example #58
0
def test_just_frosty():
    with pytest.warns(HypothesisDeprecationWarning):
        assert unicode_safe_repr(st.just(Frosty)) == u'just(☃)'
Example #59
0
def test_rasterize_invalid_shapes():
    """Invalid shapes should raise an exception rather than be skipped."""
    with pytest.raises(ValueError) as ex, pytest.warns(ShapeSkipWarning):
        rasterize([{'foo': 'bar'}], out_shape=DEFAULT_SHAPE)

    assert 'No valid geometry objects found for rasterize' in str(ex.value)
Example #60
0
def test_regression_python_based_parser(tmpdir):
    # W39: Bit values can not be masked
    with pytest.warns(W39):
        _test_regression(tmpdir, True)