def test_windowfunc_basics(): for window_name, params in window_funcs: window = getattr(signal, window_name) with warnings.catch_warnings(record=True): # window is not suitable... w1 = window(7, *params, sym=True) w2 = window(7, *params, sym=False) assert_array_almost_equal(w1, w2) # Check that functions run and output lengths are correct assert_equal(len(window(6, *params, sym=True)), 6) assert_equal(len(window(6, *params, sym=False)), 6) assert_equal(len(window(7, *params, sym=True)), 7) assert_equal(len(window(7, *params, sym=False)), 7) # Check invalid lengths assert_raises(ValueError, window, 5.5, *params) assert_raises(ValueError, window, -7, *params) # Check degenerate cases assert_array_equal(window(0, *params, sym=True), []) assert_array_equal(window(0, *params, sym=False), []) assert_array_equal(window(1, *params, sym=True), [1]) assert_array_equal(window(1, *params, sym=False), [1]) # Check dtype assert_(window(0, *params, sym=True).dtype == 'float') assert_(window(0, *params, sym=False).dtype == 'float') assert_(window(1, *params, sym=True).dtype == 'float') assert_(window(1, *params, sym=False).dtype == 'float') assert_(window(6, *params, sym=True).dtype == 'float') assert_(window(6, *params, sym=False).dtype == 'float')
def test_array_richcompare_legacy_weirdness(self): # It doesn't really work to use assert_deprecated here, b/c part of # the point of assert_deprecated is to check that when warnings are # set to "error" mode then the error is propagated -- which is good! # But here we are testing a bunch of code that is deprecated *because* # it has the habit of swallowing up errors and converting them into # different warnings. So assert_warns will have to be sufficient. assert_warns(FutureWarning, lambda: np.arange(2) == "a") assert_warns(FutureWarning, lambda: np.arange(2) != "a") # No warning for scalar comparisons with warnings.catch_warnings(): warnings.filterwarnings("error") assert_(not (np.array(0) == "a")) assert_(np.array(0) != "a") assert_(not (np.int16(0) == "a")) assert_(np.int16(0) != "a") for arg1 in [np.asarray(0), np.int16(0)]: struct = np.zeros(2, dtype="i4,i4") for arg2 in [struct, "a"]: for f in [operator.lt, operator.le, operator.gt, operator.ge]: if sys.version_info[0] >= 3: # py3 with warnings.catch_warnings() as l: warnings.filterwarnings("always") assert_raises(TypeError, f, arg1, arg2) assert_(not l) else: # py2 assert_warns(DeprecationWarning, f, arg1, arg2)
def test_corrupted_data(): import zlib for exc, fname in [(ValueError, 'corrupted_zlib_data.mat'), (zlib.error, 'corrupted_zlib_checksum.mat')]: with open(pjoin(test_data_path, fname), 'rb') as fp: rdr = MatFile5Reader(fp) assert_raises(exc, rdr.get_variables)
def test_renn_sample_wt_fit(): """Test either if an error is raised when sample is called before fitting""" # Create the object renn = RepeatedEditedNearestNeighbours(random_state=RND_SEED) assert_raises(RuntimeError, renn.sample, X, Y)
def test_TimeSeries(): """Testing the initialization of the uniform time series object """ # Test initialization with duration: tseries1 = ts.TimeSeries([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], duration=10) tseries2 = ts.TimeSeries([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], sampling_interval=1) npt.assert_equal(tseries1.time, tseries2.time) # downsampling: t1 = ts.UniformTime(length=8, sampling_rate=2) # duration is the same, but we're downsampling to 1Hz tseries1 = ts.TimeSeries(data=[1, 2, 3, 4], time=t1, sampling_rate=1) # If you didn't explicitely provide the rate you want to downsample to, that # is an error: npt.assert_raises(ValueError, ts.TimeSeries, dict(data=[1, 2, 3, 4], time=t1)) tseries2 = ts.TimeSeries(data=[1, 2, 3, 4], sampling_rate=1) tseries3 = ts.TimeSeries(data=[1, 2, 3, 4], sampling_rate=1000, time_unit="ms") # you can specify the sampling_rate or the sampling_interval, to the same # effect, where specificying the sampling_interval is in the units of that # time-series: tseries4 = ts.TimeSeries(data=[1, 2, 3, 4], sampling_interval=1, time_unit="ms") npt.assert_equal(tseries4.time, tseries3.time) # The units you use shouldn't matter - time is time: tseries6 = ts.TimeSeries(data=[1, 2, 3, 4], sampling_interval=0.001, time_unit="s") npt.assert_equal(tseries6.time, tseries3.time) # And this too - perverse, but should be possible: tseries5 = ts.TimeSeries(data=[1, 2, 3, 4], sampling_interval=ts.TimeArray(0.001, time_unit="s"), time_unit="ms") npt.assert_equal(tseries5.time, tseries3.time) # initializing with a UniformTime object: t = ts.UniformTime(length=3, sampling_rate=3) data = [1, 2, 3] tseries7 = ts.TimeSeries(data=data, time=t) npt.assert_equal(tseries7.data, data) data = [1, 2, 3, 4] # If the data is not the right length, that should throw an error: npt.assert_raises(ValueError, ts.TimeSeries, dict(data=data, time=t)) # test basic arithmetics wiht TimeSeries tseries1 = ts.TimeSeries([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], sampling_rate=1) tseries2 = tseries1 + 1 npt.assert_equal(tseries1.data + 1, tseries2.data) npt.assert_equal(tseries1.time, tseries2.time) tseries2 -= 1 npt.assert_equal(tseries1.data, tseries2.data) npt.assert_equal(tseries1.time, tseries2.time) tseries2 = tseries1 * 2 npt.assert_equal(tseries1.data * 2, tseries2.data) npt.assert_equal(tseries1.time, tseries2.time) tseries2 = tseries2 / 2 npt.assert_equal(tseries1.data, tseries2.data) npt.assert_equal(tseries1.time, tseries2.time)
def test_invalid_seed(): seed = np.ones((5, 5)) mask = np.ones((5, 5)) assert_raises(ValueError, reconstruction, seed * 2, mask, method='dilation') assert_raises(ValueError, reconstruction, seed * 0.5, mask, method='erosion')
def test_read_opts(): # tests if read is seeing option sets, at initialization and after # initialization arr = np.arange(6).reshape(1,6) stream = BytesIO() savemat(stream, {'a': arr}) rdr = MatFile5Reader(stream) back_dict = rdr.get_variables() rarr = back_dict['a'] assert_array_equal(rarr, arr) rdr = MatFile5Reader(stream, squeeze_me=True) assert_array_equal(rdr.get_variables()['a'], arr.reshape((6,))) rdr.squeeze_me = False assert_array_equal(rarr, arr) rdr = MatFile5Reader(stream, byte_order=boc.native_code) assert_array_equal(rdr.get_variables()['a'], arr) # inverted byte code leads to error on read because of swapped # header etc rdr = MatFile5Reader(stream, byte_order=boc.swapped_code) assert_raises(Exception, rdr.get_variables) rdr.byte_order = boc.native_code assert_array_equal(rdr.get_variables()['a'], arr) arr = np.array(['a string']) stream.truncate(0) stream.seek(0) savemat(stream, {'a': arr}) rdr = MatFile5Reader(stream) assert_array_equal(rdr.get_variables()['a'], arr) rdr = MatFile5Reader(stream, chars_as_strings=False) carr = np.atleast_2d(np.array(list(arr.item()), dtype='U1')) assert_array_equal(rdr.get_variables()['a'], carr) rdr.chars_as_strings = True assert_array_equal(rdr.get_variables()['a'], arr)
def test_wrong_dimensions(self): x0 = 1.0 assert_raises(RuntimeError, approx_derivative, self.wrong_dimensions_fun, x0) f0 = self.wrong_dimensions_fun(np.atleast_1d(x0)) assert_raises(ValueError, approx_derivative, self.wrong_dimensions_fun, x0, f0=f0)
def test_pad_too_many_axes(self): arr = np.arange(30).reshape(5, 6) # Attempt to pad using a 3D array equivalent bad_shape = (((3,), (4,), (5,)), ((0,), (1,), (2,))) assert_raises(ValueError, pad, arr, bad_shape, mode='constant')
def test_ada_sample_wt_fit(): """Test either if an error is raised when sample is called before fitting""" # Create the object ada = ADASYN(random_state=RND_SEED) assert_raises(RuntimeError, ada.sample, X, Y)
def test_ada_wrong_nn_obj(): """Test either if an error is raised while passing a wrong NN object""" # Resample the data nn = 'rnd' ada = ADASYN(random_state=RND_SEED, n_neighbors=nn) assert_raises(ValueError, ada.fit_sample, X, Y)
def test_normalize_data(): sig = np.arange(1, 66)[::-1] where_b0 = np.zeros(65, 'bool') where_b0[0] = True assert_raises(ValueError, normalize_data, sig, where_b0, out=sig) norm_sig = normalize_data(sig, where_b0, min_signal=1) assert_array_almost_equal(norm_sig, sig / 65.) norm_sig = normalize_data(sig, where_b0, min_signal=5) assert_array_almost_equal(norm_sig[-5:], 5 / 65.) where_b0[[0, 1]] = [True, True] norm_sig = normalize_data(sig, where_b0, min_signal=1) assert_array_almost_equal(norm_sig, sig / 64.5) norm_sig = normalize_data(sig, where_b0, min_signal=5) assert_array_almost_equal(norm_sig[-5:], 5 / 64.5) sig = sig * np.ones((2, 3, 1)) where_b0[[0, 1]] = [True, False] norm_sig = normalize_data(sig, where_b0, min_signal=1) assert_array_almost_equal(norm_sig, sig / 65.) norm_sig = normalize_data(sig, where_b0, min_signal=5) assert_array_almost_equal(norm_sig[..., -5:], 5 / 65.) where_b0[[0, 1]] = [True, True] norm_sig = normalize_data(sig, where_b0, min_signal=1) assert_array_almost_equal(norm_sig, sig / 64.5) norm_sig = normalize_data(sig, where_b0, min_signal=5) assert_array_almost_equal(norm_sig[..., -5:], 5 / 64.5)
def test_sph_harm_ind_list(): m_list, n_list = sph_harm_ind_list(8) assert_equal(m_list.shape, n_list.shape) assert_equal(m_list.shape, (45,)) assert_true(np.all(np.abs(m_list) <= n_list)) assert_array_equal(n_list % 2, 0) assert_raises(ValueError, sph_harm_ind_list, 1)
def test_SeedCoherenceAnalyzer(): """ Test the SeedCoherenceAnalyzer """ methods = (None, {"this_method": 'welch', "NFFT": 256}, {"this_method": 'multi_taper_csd'}, {"this_method": 'periodogram_csd', "NFFT": 256}) Fs = np.pi t = np.arange(256) seed1 = np.sin(10 * t) + np.random.rand(t.shape[-1]) seed2 = np.sin(10 * t) + np.random.rand(t.shape[-1]) target = np.sin(10 * t) + np.random.rand(t.shape[-1]) T = ts.TimeSeries(np.vstack([seed1, target]), sampling_rate=Fs) T_seed1 = ts.TimeSeries(seed1, sampling_rate=Fs) T_seed2 = ts.TimeSeries(np.vstack([seed1, seed2]), sampling_rate=Fs) T_target = ts.TimeSeries(np.vstack([seed1, target]), sampling_rate=Fs) for this_method in methods: if this_method is None or this_method['this_method']=='welch': C1 = nta.CoherenceAnalyzer(T, method=this_method) C2 = nta.SeedCoherenceAnalyzer(T_seed1, T_target, method=this_method) C3 = nta.SeedCoherenceAnalyzer(T_seed2, T_target, method=this_method) npt.assert_almost_equal(C1.coherence[0, 1], C2.coherence[1]) npt.assert_almost_equal(C2.coherence[1], C3.coherence[0, 1]) npt.assert_almost_equal(C1.phase[0, 1], C2.relative_phases[1]) npt.assert_almost_equal(C1.delay[0, 1], C2.delay[1]) else: npt.assert_raises(ValueError,nta.SeedCoherenceAnalyzer, T_seed1, T_target, this_method)
def test_SparseCoherenceAnalyzer(): Fs = np.pi t = np.arange(256) x = np.sin(10 * t) + np.random.rand(t.shape[-1]) y = np.sin(10 * t) + np.random.rand(t.shape[-1]) T = ts.TimeSeries(np.vstack([x, y]), sampling_rate=Fs) C1 = nta.SparseCoherenceAnalyzer(T, ij=((0, 1), (1, 0))) C2 = nta.CoherenceAnalyzer(T) # Coherence symmetry: npt.assert_equal(np.abs(C1.coherence[0, 1]), np.abs(C1.coherence[1, 0])) npt.assert_equal(np.abs(C1.coherency[0, 1]), np.abs(C1.coherency[1, 0])) # Make sure you get the same answers as you would from the standard # CoherenceAnalyzer: npt.assert_almost_equal(C2.coherence[0, 1], C1.coherence[0, 1]) # This is the PSD (for the first time-series in the object): npt.assert_almost_equal(C2.spectrum[0, 0], C1.spectrum[0]) # And the second (for good measure): npt.assert_almost_equal(C2.spectrum[1, 1], C1.spectrum[1]) # The relative phases should be equal npt.assert_almost_equal(C2.phase[0, 1], C1.relative_phases[0, 1]) # But not the absolute phases (which have the same shape): npt.assert_equal(C1.phases[0].shape, C1.relative_phases[0, 1].shape) # The delay is equal: npt.assert_almost_equal(C2.delay[0, 1], C1.delay[0, 1]) # Make sure that you would get an error if you provided a method other than # 'welch': npt.assert_raises(ValueError, nta.SparseCoherenceAnalyzer, T, method=dict(this_method='foo'))
def test_version_2_0_memmap(): # requires more than 2 byte for header dt = [(("%d" % i) * 100, float) for i in range(500)] d = np.ones(1000, dtype=dt) tf = tempfile.mktemp('', 'mmap', dir=tempdir) # 1.0 requested but data cannot be saved this way assert_raises(ValueError, format.open_memmap, tf, mode='w+', dtype=d.dtype, shape=d.shape, version=(1, 0)) ma = format.open_memmap(tf, mode='w+', dtype=d.dtype, shape=d.shape, version=(2, 0)) ma[...] = d del ma with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', UserWarning) ma = format.open_memmap(tf, mode='w+', dtype=d.dtype, shape=d.shape, version=None) assert_(w[0].category is UserWarning) ma[...] = d del ma ma = format.open_memmap(tf, mode='r') assert_array_equal(ma, d)
def test_bad_header(): # header of length less than 2 should fail s = BytesIO() assert_raises(ValueError, format.read_array_header_1_0, s) s = BytesIO(asbytes('1')) assert_raises(ValueError, format.read_array_header_1_0, s) # header shorter than indicated size should fail s = BytesIO(asbytes('\x01\x00')) assert_raises(ValueError, format.read_array_header_1_0, s) # headers without the exact keys required should fail d = {"shape": (1, 2), "descr": "x"} s = BytesIO() format.write_array_header_1_0(s, d) assert_raises(ValueError, format.read_array_header_1_0, s) d = {"shape": (1, 2), "fortran_order": False, "descr": "x", "extrakey": -1} s = BytesIO() format.write_array_header_1_0(s, d) assert_raises(ValueError, format.read_array_header_1_0, s)
def test_diags_bad(self): a = array([1, 2, 3, 4, 5]) b = array([6, 7, 8, 9, 10]) c = array([11, 12, 13, 14, 15]) cases = [] cases.append(([a[:0]], 0, (1, 1))) cases.append(([a], [0], (1, 1))) cases.append(([a[:3],b], [0,2], (3, 3))) cases.append(([a[:4],b,c[:3]], [-1,0,1], (5, 5))) cases.append(([a[:2],c,b[:3]], [-4,2,-1], (6, 5))) cases.append(([a[:2],c,b[:3]], [-4,2,-1], None)) cases.append(([], [-4,2,-1], None)) cases.append(([1], [-4], (4, 4))) cases.append(([a[:0]], [-1], (1, 2))) cases.append(([a], 0, None)) for d, o, shape in cases: try: assert_raises(ValueError, construct.diags, d, o, shape) except: print("%r %r %r" % (d, o, shape)) raise assert_raises(TypeError, construct.diags, [[None]], [0])
def test_random_sampling(self): # Simple sanity checks for sparse random sampling. for f in sprand, _sprandn: for t in [np.float32, np.float64, np.longdouble]: x = f(5, 10, density=0.1, dtype=t) assert_equal(x.dtype, t) assert_equal(x.shape, (5, 10)) assert_equal(x.nonzero()[0].size, 5) x1 = f(5, 10, density=0.1, random_state=4321) assert_equal(x1.dtype, np.double) x2 = f(5, 10, density=0.1, random_state=np.random.RandomState(4321)) assert_array_equal(x1.data, x2.data) assert_array_equal(x1.row, x2.row) assert_array_equal(x1.col, x2.col) for density in [0.0, 0.1, 0.5, 1.0]: x = f(5, 10, density=density) assert_equal(x.nnz, int(density * np.prod(x.shape))) for fmt in ['coo', 'csc', 'csr', 'lil']: x = f(5, 10, format=fmt) assert_equal(x.format, fmt) assert_raises(ValueError, lambda: f(5, 10, 1.1)) assert_raises(ValueError, lambda: f(5, 10, -0.1))
def test_config_parser(): _environ = dict(os.environ) os.environ['BLOCKS_CONFIG'] = os.path.join(os.getcwd(), '.test_blocksrc') with open(os.environ['BLOCKS_CONFIG'], 'w') as f: f.write('data_path: yaml_path') if 'BLOCKS_DATA_PATH' in os.environ: del os.environ['BLOCKS_DATA_PATH'] try: config = Configuration() config.add_config('data_path', str, env_var='BLOCKS_DATA_PATH') config.add_config('config_with_default', int, default='1', env_var='BLOCKS_CONFIG_TEST') config.add_config('config_without_default', str) assert config.data_path == 'yaml_path' os.environ['BLOCKS_DATA_PATH'] = 'env_path' assert config.data_path == 'env_path' assert config.config_with_default == 1 os.environ['BLOCKS_CONFIG_TEST'] = '2' assert config.config_with_default == 2 assert_raises(ConfigurationError, getattr, config, 'non_existing_config') assert_raises(ConfigurationError, getattr, config, 'config_without_default') finally: os.remove(os.environ['BLOCKS_CONFIG']) os.environ.clear() os.environ.update(_environ)
def test_invalid_array(self): # seed must be an unsigned 32 bit integer assert_raises(TypeError, np.random.RandomState, [-0.5]) assert_raises(ValueError, np.random.RandomState, [-1]) assert_raises(ValueError, np.random.RandomState, [4294967296]) assert_raises(ValueError, np.random.RandomState, [1, 2, 4294967296]) assert_raises(ValueError, np.random.RandomState, [1, -2, 4294967296])
def test_plot_sparse_source_estimates(): """Test plotting of (sparse) source estimates """ sample_src = read_source_spaces(op.join(data_dir, 'subjects', 'sample', 'bem', 'sample-oct-6-src.fif')) # dense version vertices = [s['vertno'] for s in sample_src] n_time = 5 n_verts = sum(len(v) for v in vertices) stc_data = np.zeros((n_verts * n_time)) stc_data[(np.random.rand(20) * n_verts * n_time).astype(int)] = 1 stc_data.shape = (n_verts, n_time) stc = SourceEstimate(stc_data, vertices, 1, 1) colormap = mne_analyze_colormap(format='matplotlib') # don't really need to test matplotlib method since it's not used now... colormap = mne_analyze_colormap() plot_source_estimates(stc, 'sample', colormap=colormap, config_opts={'background': (1, 1, 0)}, subjects_dir=subjects_dir, colorbar=True) assert_raises(TypeError, plot_source_estimates, stc, 'sample', figure='foo', hemi='both') # now do sparse version vertices = sample_src[0]['vertno'] n_verts = len(vertices) stc_data = np.zeros((n_verts * n_time)) stc_data[(np.random.rand(20) * n_verts * n_time).astype(int)] = 1 stc_data.shape = (n_verts, n_time) inds = np.where(np.any(stc_data, axis=1))[0] stc_data = stc_data[inds] vertices = [vertices[inds], np.empty(0, dtype=np.int)] stc = SourceEstimate(stc_data, vertices, 1, 1) plot_sparse_source_estimates(sample_src, stc, bgcolor=(1, 1, 1), opacity=0.5, high_resolution=True)
def test_set_item_index_error(self): code = """ py::tuple a(3); a[4] = 1; return_val = a; """ assert_raises(IndexError, inline_tools.inline, code)
def test_rag_error(): img = np.zeros((10, 10, 3), dtype='uint8') labels = np.zeros((10, 10), dtype='uint8') labels[:5, :] = 0 labels[5:, :] = 1 testing.assert_raises(ValueError, graph.rag_mean_color, img, labels, 2, 'non existant mode')
def test_neighbors_badargs(): """Test bad argument values: these should all raise ValueErrors""" assert_raises(ValueError, neighbors.NearestNeighbors, algorithm='blah') X = rng.random_sample((10, 2)) for cls in (neighbors.KNeighborsClassifier, neighbors.RadiusNeighborsClassifier, neighbors.KNeighborsRegressor, neighbors.RadiusNeighborsRegressor): assert_raises(ValueError, cls, weights='blah') nbrs = cls() assert_raises(ValueError, nbrs.predict, X) nbrs = neighbors.NearestNeighbors().fit(X) assert_raises(ValueError, nbrs.kneighbors_graph, X, mode='blah') assert_raises(ValueError, nbrs.radius_neighbors_graph, X, mode='blah')
def test_roll_evals(): """ """ # Just making sure this never passes through weird_evals = np.array([1, 0.5]) npt.assert_raises(ValueError, dti._roll_evals, weird_evals)
def test_representation(): # Test an invalid number of states def zero_kstates(): mod = Representation(1, 0) assert_raises(ValueError, zero_kstates) # Test an invalid endogenous array def empty_endog(): endog = np.zeros((0,0)) mod = Representation(endog, k_states=2) assert_raises(ValueError, empty_endog) # Test a Fortran-ordered endogenous array (which will be assumed to be in # wide format: k_endog x nobs) nobs = 10 k_endog = 2 endog = np.asfortranarray(np.arange(nobs*k_endog).reshape(k_endog,nobs)*1.) mod = Representation(endog, k_states=2) assert_equal(mod.nobs, nobs) assert_equal(mod.k_endog, k_endog) # Test a C-ordered endogenous array (which will be assumed to be in # tall format: nobs x k_endog) nobs = 10 k_endog = 2 endog = np.arange(nobs*k_endog).reshape(nobs,k_endog)*1. mod = Representation(endog, k_states=2) assert_equal(mod.nobs, nobs) assert_equal(mod.k_endog, k_endog) # Test getting the statespace representation assert_equal(mod._statespace, None) mod._initialize_representation() assert(mod._statespace is not None)
def test_fetch_data(): symmetric362 = SPHERE_FILES['symmetric362'] with TemporaryDirectory() as tmpdir: md5 = fetcher._get_file_md5(symmetric362) bad_md5 = '8' * len(md5) newfile = path.join(tmpdir, "testfile.txt") # Test that the fetcher can get a file testfile_url = pathname2url(symmetric362) testfile_url = urljoin("file:", testfile_url) files = {"testfile.txt" : (testfile_url, md5)} fetcher.fetch_data(files, tmpdir) npt.assert_(path.exists(newfile)) # Test that the file is replaced when the md5 doesn't match with open(newfile, 'a') as f: f.write("some junk") fetcher.fetch_data(files, tmpdir) npt.assert_(path.exists(newfile)) npt.assert_equal(fetcher._get_file_md5(newfile), md5) # Test that an error is raised when the md5 checksum of the download # file does not match the expected value files = {"testfile.txt" : (testfile_url, bad_md5)} npt.assert_raises(fetcher.FetcherError, fetcher.fetch_data, files, tmpdir)
def test_roots_hermite(): rootf = sc.roots_hermite evalf = orth.eval_hermite weightf = orth.hermite(5).weight_func verify_gauss_quad(rootf, evalf, weightf, -np.inf, np.inf, 5) verify_gauss_quad(rootf, evalf, weightf, -np.inf, np.inf, 25, atol=1e-13) verify_gauss_quad(rootf, evalf, weightf, -np.inf, np.inf, 100, atol=1e-12) # Golub-Welsch branch x, w = sc.roots_hermite(5, False) y, v, m = sc.roots_hermite(5, True) assert_allclose(x, y, 1e-14, 1e-14) assert_allclose(w, v, 1e-14, 1e-14) muI, muI_err = integrate.quad(weightf, -np.inf, np.inf) assert_allclose(m, muI, rtol=muI_err) # Asymptotic branch (switch over at n >= 150) x, w = sc.roots_hermite(200, False) y, v, m = sc.roots_hermite(200, True) assert_allclose(x, y, 1e-14, 1e-14) assert_allclose(w, v, 1e-14, 1e-14) assert_allclose(sum(v), m, 1e-14, 1e-14) assert_raises(ValueError, sc.roots_hermite, 0) assert_raises(ValueError, sc.roots_hermite, 3.3)
def test_GradientTable(): gradients = np.array([[0, 0, 0], [1, 0, 0], [0, 0, 1], [3, 4, 0], [5, 0, 12]], 'float') expected_bvals = np.array([0, 1, 1, 5, 13]) expected_b0s_mask = expected_bvals == 0 expected_bvecs = gradients / (expected_bvals + expected_b0s_mask)[:, None] gt = GradientTable(gradients, b0_threshold=0) npt.assert_array_almost_equal(gt.bvals, expected_bvals) npt.assert_array_equal(gt.b0s_mask, expected_b0s_mask) npt.assert_array_almost_equal(gt.bvecs, expected_bvecs) npt.assert_array_almost_equal(gt.gradients, gradients) gt = GradientTable(gradients, b0_threshold=1) npt.assert_array_equal(gt.b0s_mask, [1, 1, 1, 0, 0]) npt.assert_array_equal(gt.bvals, expected_bvals) npt.assert_array_equal(gt.bvecs, expected_bvecs) npt.assert_raises(ValueError, GradientTable, np.ones((6, 2))) npt.assert_raises(ValueError, GradientTable, np.ones((6,)))
def test_index_no_floats(self): a = np.array([[[5]]]) assert_raises(IndexError, lambda: a[0.0]) assert_raises(IndexError, lambda: a[0, 0.0]) assert_raises(IndexError, lambda: a[0.0, 0]) assert_raises(IndexError, lambda: a[0.0,:]) assert_raises(IndexError, lambda: a[:, 0.0]) assert_raises(IndexError, lambda: a[:, 0.0,:]) assert_raises(IndexError, lambda: a[0.0,:,:]) assert_raises(IndexError, lambda: a[0, 0, 0.0]) assert_raises(IndexError, lambda: a[0.0, 0, 0]) assert_raises(IndexError, lambda: a[0, 0.0, 0]) assert_raises(IndexError, lambda: a[-1.4]) assert_raises(IndexError, lambda: a[0, -1.4]) assert_raises(IndexError, lambda: a[-1.4, 0]) assert_raises(IndexError, lambda: a[-1.4,:]) assert_raises(IndexError, lambda: a[:, -1.4]) assert_raises(IndexError, lambda: a[:, -1.4,:]) assert_raises(IndexError, lambda: a[-1.4,:,:]) assert_raises(IndexError, lambda: a[0, 0, -1.4]) assert_raises(IndexError, lambda: a[-1.4, 0, 0]) assert_raises(IndexError, lambda: a[0, -1.4, 0]) assert_raises(IndexError, lambda: a[0.0:, 0.0]) assert_raises(IndexError, lambda: a[0.0:, 0.0,:])
def test_otsu_one_color_image(): img = np.ones((10, 10), dtype=np.uint8) assert_raises(ValueError, threshold_otsu, img)
def test_invalid_mlags(): data = dt.data.double_well_discrete().dtraj est = dt.markov.msm.MaximumLikelihoodMSM() est.fit(data, lagtime=1) with assert_raises(ValueError): est.chapman_kolmogorov_validator(2, mlags=[0, 1, -10])
def test_index_no_array_to_index(self): # No non-scalar arrays. a = np.array([[[1]]]) assert_raises(TypeError, lambda: a[a:a:a])
def test_slicing_no_floats(self): a = np.array([[5]]) # start as float. assert_raises(TypeError, lambda: a[0.0:]) assert_raises(TypeError, lambda: a[0:, 0.0:2]) assert_raises(TypeError, lambda: a[0.0::2, :0]) assert_raises(TypeError, lambda: a[0.0:1:2,:]) assert_raises(TypeError, lambda: a[:, 0.0:]) # stop as float. assert_raises(TypeError, lambda: a[:0.0]) assert_raises(TypeError, lambda: a[:0, 1:2.0]) assert_raises(TypeError, lambda: a[:0.0:2, :0]) assert_raises(TypeError, lambda: a[:0.0,:]) assert_raises(TypeError, lambda: a[:, 0:4.0:2]) # step as float. assert_raises(TypeError, lambda: a[::1.0]) assert_raises(TypeError, lambda: a[0:, :2:2.0]) assert_raises(TypeError, lambda: a[1::4.0, :0]) assert_raises(TypeError, lambda: a[::5.0,:]) assert_raises(TypeError, lambda: a[:, 0:4:2.0]) # mixed. assert_raises(TypeError, lambda: a[1.0:2:2.0]) assert_raises(TypeError, lambda: a[1.0::2.0]) assert_raises(TypeError, lambda: a[0:, :2.0:2.0]) assert_raises(TypeError, lambda: a[1.0:1:4.0, :0]) assert_raises(TypeError, lambda: a[1.0:5.0:5.0,:]) assert_raises(TypeError, lambda: a[:, 0.4:4.0:2.0]) # should still get the DeprecationWarning if step = 0. assert_raises(TypeError, lambda: a[::0.0])
def test_broaderrors_indexing(self): a = np.zeros((5, 5)) assert_raises(IndexError, a.__getitem__, ([0, 1], [0, 1, 2])) assert_raises(IndexError, a.__setitem__, ([0, 1], [0, 1, 2]), 0)
def test_too_many_fancy_indices_special_case(self): # Just documents behaviour, this is a small limitation. a = np.ones((1,) * 32) # 32 is NPY_MAXDIMS assert_raises(IndexError, a.__getitem__, (np.array([0]),) * 32)
def test_basic(self): a = np.arange(10) assert_raises(IndexError, lambda: a[..., ...]) assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 2,)) assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 3,))
def test_reduce_axis_float_index(self): d = np.zeros((3,3,3)) assert_raises(TypeError, np.min, d, 0.5) assert_raises(TypeError, np.min, d, (0.5, 1)) assert_raises(TypeError, np.min, d, (1, 2.2)) assert_raises(TypeError, np.min, d, (.2, 1.2))
def test_setitem(self): assign = functools.partial(array_indexing, 1) # Deletion is impossible: assert_raises(ValueError, assign, np.ones(10), 0) # 0-d arrays don't work: assert_raises(IndexError, assign, np.ones(()), 0, 0) # Out of bound values: assert_raises(IndexError, assign, np.ones(10), 11, 0) assert_raises(IndexError, assign, np.ones(10), -11, 0) assert_raises(IndexError, assign, np.ones((10, 10)), 11, 0) assert_raises(IndexError, assign, np.ones((10, 10)), -11, 0) a = np.arange(10) assign(a, 4, 10) assert_(a[4] == 10) a = a.reshape(5, 2) assign(a, 4, 10) assert_array_equal(a[-1], [10, 10])
def test_octave_error(self): npt.assert_raises(octavemagic.OctaveMagicError, self.ip.run_cell_magic, 'octave', '', 'a = ones2(1)')
def test_boolean_indexing_weirdness(self): # Weird boolean indexing things a = np.ones((2, 3, 4)) a[False, True, ...].shape == (0, 2, 3, 4) a[True, [0, 1], True, True, [1], [[2]]] == (1, 2) assert_raises(IndexError, lambda: a[False, [0, 1], ...])
def test_non_unique_vocab(): vocab = ['a', 'b', 'c', 'a', 'a'] vect = CountVectorizer(vocabulary=vocab) assert_raises(ValueError, vect.fit, [])
def test_precision_prior_wrong_nb(): with assert_raises(ValueError): m = tm.THMM(n_unique=2) m.precision_prior_ = np.array([0.7, 0.8, 0.9])
def test_duplicate_keys(self): a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')]) b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')]) assert_raises(ValueError, join_by, ['a', 'b', 'b'], a, b)
def test_exceptions(self): assert_raises(ValueError, triu_indices_from, np.ones((2, ))) assert_raises(ValueError, triu_indices_from, np.ones((2, 2, 2)))
def test_switched_arguments(): image = np.ones((5, 5)) template = np.ones((3, 3)) assert_raises(ValueError, match_template, template, image)
def test_vectorizer(): # raw documents as an iterator train_data = iter(ALL_FOOD_DOCS[:-1]) test_data = [ALL_FOOD_DOCS[-1]] n_train = len(ALL_FOOD_DOCS) - 1 # test without vocabulary v1 = CountVectorizer(max_df=0.5) counts_train = v1.fit_transform(train_data) if hasattr(counts_train, 'tocsr'): counts_train = counts_train.tocsr() assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2) # build a vectorizer v1 with the same vocabulary as the one fitted by v1 v2 = CountVectorizer(vocabulary=v1.vocabulary_) # compare that the two vectorizer give the same output on the test sample for v in (v1, v2): counts_test = v.transform(test_data) if hasattr(counts_test, 'tocsr'): counts_test = counts_test.tocsr() vocabulary = v.vocabulary_ assert_equal(counts_test[0, vocabulary["salad"]], 1) assert_equal(counts_test[0, vocabulary["tomato"]], 1) assert_equal(counts_test[0, vocabulary["water"]], 1) # stop word from the fixed list assert_false("the" in vocabulary) # stop word found automatically by the vectorizer DF thresholding # words that are high frequent across the complete corpus are likely # to be not informative (either real stop words of extraction # artifacts) assert_false("copyright" in vocabulary) # not present in the sample assert_equal(counts_test[0, vocabulary["coke"]], 0) assert_equal(counts_test[0, vocabulary["burger"]], 0) assert_equal(counts_test[0, vocabulary["beer"]], 0) assert_equal(counts_test[0, vocabulary["pizza"]], 0) # test tf-idf t1 = TfidfTransformer(norm='l1') tfidf = t1.fit(counts_train).transform(counts_train).toarray() assert_equal(len(t1.idf_), len(v1.vocabulary_)) assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_))) # test tf-idf with new data tfidf_test = t1.transform(counts_test).toarray() assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_))) # test tf alone t2 = TfidfTransformer(norm='l1', use_idf=False) tf = t2.fit(counts_train).transform(counts_train).toarray() assert_equal(t2.idf_, None) # test idf transform with unlearned idf vector t3 = TfidfTransformer(use_idf=True) assert_raises(ValueError, t3.transform, counts_train) # test idf transform with incompatible n_features X = [[1, 1, 5], [1, 1, 0]] t3.fit(X) X_incompt = [[1, 3], [1, 3]] assert_raises(ValueError, t3.transform, X_incompt) # L1-normalized term frequencies sum to one assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train) # test the direct tfidf vectorizer # (equivalent to term count vectorizer + tfidf transformer) train_data = iter(ALL_FOOD_DOCS[:-1]) tv = TfidfVectorizer(norm='l1') tv.max_df = v1.max_df tfidf2 = tv.fit_transform(train_data).toarray() assert_false(tv.fixed_vocabulary_) assert_array_almost_equal(tfidf, tfidf2) # test the direct tfidf vectorizer with new data tfidf_test2 = tv.transform(test_data).toarray() assert_array_almost_equal(tfidf_test, tfidf_test2) # test transform on unfitted vectorizer with empty vocabulary v3 = CountVectorizer(vocabulary=None) assert_raises(ValueError, v3.transform, train_data) # ascii preprocessor? v3.set_params(strip_accents='ascii', lowercase=False) assert_equal(v3.build_preprocessor(), strip_accents_ascii) # error on bad strip_accents param v3.set_params(strip_accents='_gabbledegook_', preprocessor=None) assert_raises(ValueError, v3.build_preprocessor) # error with bad analyzer type v3.set_params = '_invalid_analyzer_type_' assert_raises(ValueError, v3.build_analyzer)
def test_request_unknown_dimension(): brick = TestBrick() assert_raises(ValueError, brick.get_dim, 'unknown')
def test_may_share_memory_bad_max_work(): x = np.zeros([1]) assert_raises(OverflowError, np.may_share_memory, x, x, max_work=10**100) assert_raises(OverflowError, np.shares_memory, x, x, max_work=10**100)
def test_transform_error(self, X, y, expected): enc = TargetEncoder(cols=['cat'], handle_unseen='error') X = pd.DataFrame(X, columns=['cat']) enc.fit(X, pd.Series(y)) X.iloc[0, 0] = 'foo' assert_raises(ValueError, enc.transform, X)
def test_apply(): brick = TestBrick(0) assert TestBrick.apply(brick, [0]) == [0, 1] if six.PY2: assert_raises(TypeError, TestBrick.apply, [0])
def test_init_wrong_input(self, handle_unseen): assert_raises(ValueError, TargetEncoder, None, handle_unseen)
def test_apply_not_child(): child = TestBrick() parent = ParentBrick(child) parent.children = [] assert_raises(ValueError, parent.apply, tensor.matrix())
def test_error_lowsamples(self): # raises error if samples are low (< 3) x = np.arange(3) y = np.arange(3) assert_raises(ValueError, _CheckInputs(x, y))
def test_transform_before_fit(self): enc = TargetEncoder() assert_raises(ValueError, enc.transform, 1)
def test_error_notndarray(self): # raises error if x or y is not a ndarray x = np.arange(20) y = [5] * 20 assert_raises(TypeError, _CheckInputs(x, y)) assert_raises(TypeError, _CheckInputs(y, x))
def test_error_reps(self, reps): # raises error if reps is negative x = np.arange(20) assert_raises(ValueError, _CheckInputs(x, x, reps=reps))
def test_pairwise_distances_no_broadcast(self): assert_allclose(pairwise_distances_no_broadcast(self.X, self.Y), [1.41421356, 2.23606798, 4.58257569, 4.12310563]) with assert_raises(ValueError): pairwise_distances_no_broadcast([1, 2, 3], [6])
def test_error_shape(self): # raises error if number of samples different (n) x = np.arange(100).reshape(25, 4) y = x.reshape(10, 10) assert_raises(ValueError, _CheckInputs(x, y))