def test_with_data_raises_exception_in_teardown(self): self.fxt.loader = AbusiveMockLoader() @self.fxt.with_data(StubDataset1, StubDataset2) def some_callable(data): pass raises(ValueError)(some_callable)() eq_(mock_call_log[0], (AbusiveMockLoader, 'load', StubSuperSet))
def test_cqt(): sr = 11025 duration = 5.0 y = make_signal(sr, duration) # incorrect hop length for a 6-octave analysis # num_octaves = 6, 2**(6-1) = 32 > 16 for hop_length in [-1, 0, 16, 63, 65]: yield (raises(librosa.ParameterError)(__test_cqt_size), y, sr, hop_length, None, 72, 12, 0.0, 2, None, 1, 0.01) # Filters go beyond Nyquist. 500 Hz -> 4 octaves = 8000 Hz > 11000 Hz yield (raises(librosa.ParameterError)(__test_cqt_size), y, sr, 512, 500, 4 * 12, 12, 0.0, 2, None, 1, 0.01) # Test with fmin near Nyquist for fmin in [3000, 4800]: for n_bins in [1, 2]: for bins_per_octave in [12]: yield (__test_cqt_size, y, sr, 512, fmin, n_bins, bins_per_octave, 0.0, 2, None, 1, 0.01) # Test for no errors and correct output size for fmin in [None, librosa.note_to_hz('C2')]: for n_bins in [1, 12, 24, 48, 72, 74, 76]: for bins_per_octave in [12, 24]: for tuning in [None, 0, 0.25]: for resolution in [1, 2]: for norm in [1, 2]: yield (__test_cqt_size, y, sr, 512, fmin, n_bins, bins_per_octave, tuning, resolution, None, norm, 0.01)
def test_untrusted_checkers(): def _test(checker, callback, sandboxed=True): with TemporaryCwd(): upload_files() checker_bin = compile(checker, '/chk.e')['out_file'] with TemporaryCwd(): executor = SupervisedExecutor(use_program_return_code=True) if \ sandboxed else DetailedUnprotectedExecutor() renv = compile_and_run('/add_print.c', { 'in_file': '/input', 'check_output': True, 'hint_file': '/hint', 'chk_file': checker_bin, 'untrusted_checker': True, }, executor, use_sandboxes=sandboxed) print_env(renv) if callback: callback(renv) def ok_42(env): res_ok(env) eq_(42, int(env['result_percentage'])) # Test if unprotected execution allows for return code 1 yield _test, '/chk-rtn1.c', None, False # Test if unprotected execution allows for return code 2 yield raises(SystemError)(_test), '/chk-rtn2.c', None, False if ENABLE_SANDBOXES: yield _test, '/chk.c', ok_42 # Broken checker yield _test, '/open2.c', res_wa # Wrong model solution yield raises(SystemError)(_test), '/chk-rtn2.c', None
def test_MGLSADF(): from pysptk.synthesis import MGLSADF def __test_synthesis(filt): # dummy source excitation source = __dummy_source() hopsize = 80 # dummy filter coef. windowed = __dummy_windowed_frames( source, frame_len=512, hopsize=hopsize) gamma = -1.0 / filt.stage mgc = np.apply_along_axis(pysptk.mgcep, 1, windowed, filt.order, filt.alpha, gamma) b = np.apply_along_axis(pysptk.mgc2b, 1, mgc, filt.alpha, gamma) # synthesis synthesizer = Synthesizer(filt, hopsize) y = synthesizer.synthesis(source, b) assert np.all(np.isfinite(y)) def __test(order, alpha, stage): __test_synthesis(MGLSADF(order, alpha, stage)) for order in [20, 25]: for alpha in [0.0, 0.41]: for stage in [2, 5, 10]: yield __test, order, alpha, stage def __test_invalid_stage(stage): MGLSADF(20, stage=stage) yield raises(ValueError)(__test_invalid_stage), -1 yield raises(ValueError)(__test_invalid_stage), 0
def test_trans_cycle(): def __trans(n, p): A = librosa.sequence.transition_cycle(n, p) # Right shape assert A.shape == (n, n) # diag is correct assert np.allclose(np.diag(A), p) for i in range(n): assert A[i, np.mod(i + 1, n)] == 1 - A[i, i] # we have well-formed distributions assert np.all(A >= 0) assert np.allclose(A.sum(axis=1), 1) # Test with constant self-loops for n in range(2, 4): yield __trans, n, 0.5 # Test with variable self-loops yield __trans, 3, [0.8, 0.7, 0.5] # Failure if we don't have enough states yield raises(librosa.ParameterError)(__trans), 1, 0.5 # Failure if n_states is wrong yield raises(librosa.ParameterError)(__trans), None, 0.5 # Failure if p is not a probability yield raises(librosa.ParameterError)(__trans), 3, 1.5 yield raises(librosa.ParameterError)(__trans), 3, -0.25 # Failure if there's a shape mismatch yield raises(librosa.ParameterError)(__trans), 3, [0.5, 0.2]
def test_raises(self): from nose.case import FunctionTestCase def raise_typeerror(): raise TypeError("foo") def noraise(): pass raise_good = raises(TypeError)(raise_typeerror) raise_other = raises(ValueError)(raise_typeerror) no_raise = raises(TypeError)(noraise) tc = FunctionTestCase(raise_good) self.assertEqual(str(tc), "%s.%s" % (__name__, 'raise_typeerror')) raise_good() try: raise_other() except TypeError as e: pass else: self.fail("raises did pass through unwanted exception") try: no_raise() except AssertionError as e: pass else: self.fail("raises did not raise assertion error on no exception")
def test_eula(): "Check step name, license title." xio.fauxStdin = [''] xio.fauxStdout = [] inputCount = len(xio.fauxStdin) eula = eula_ui.EulaWindow() raises(SystemExit)(eula.run)() sessionOutput = ''.join(xio.fauxStdout) # step name check assert xio.fauxStdout[0].find( 'End User License Agreement'.ljust(64, '-')) >= 0 # license title check found = False for title in _LicenseTitles: if title in sessionOutput: found = True print "Found license:", title break assert found # screen-count check -- not quite enough for a separate step screenCount = 0 for text in xio.fauxStdout: if text.find('--------\n') >= 0: screenCount += 1 assert screenCount == inputCount + 1
def check_arrayaccess(clibname, list_num, list_cdt, cdt, dim, _calloc_=None, carrtype=None): """Check C side array access""" if cdt in ['char', 'short', 'ushort', 'int', 'uint', 'long', 'ulong', 'longlong', 'ulonglong', 'bool', 'size_t']: ass_eq = assert_equal elif cdt in ['float', 'double', 'longdouble']: ass_eq = assert_almost_equal ArrayAccess = gene_class_ArrayAccess( clibname, len(list_num), list_cdt, carrtype) num_dict = dict(zip(ArrayAccess.num_names, list_num)) # {num_i: 6, ...} if _calloc_ is not None: num_dict.update(_calloc_=_calloc_) aa = ArrayAccess(**num_dict) aa.fill() # arr_via_ret should return same array (garr) garr = aa.arr_via_ret(cdt, dim) arr = aa.arr(cdt, dim) ass_eq(garr, arr) # insert completely different value to 'arr' if cdt == 'char': arr.flat = alpharange(100, numpy.prod(arr.shape) + 100) elif cdt == 'bool': arr[:] = -arr else: arr += 100 raises(AssertionError)(assert_equal)(garr, arr) # get array (garr2) via arr_via_ret again garr2 = aa.arr_via_ret(cdt, dim) assert_equal(garr2, arr)
def test_melody_invalid(): f1 = np.linspace(110.0, 440.0, 10) v1 = np.sign(np.random.randn(len(f1))) v2 = np.sign(np.random.randn(len(f1))) ref_ann = create_annotation(values=f1 * v1, confidence=1.0, duration=0.01, namespace='pitch_hz') est_ann = create_annotation(values=f1 * v2, confidence=1.0, duration=0.01, namespace='pitch_midi') yield raises(jams.NamespaceError)(jams.eval.melody), ref_ann, est_ann yield raises(jams.NamespaceError)(jams.eval.melody), est_ann, ref_ann est_ann = create_annotation(values=['a', 'b', 'c'], confidence=1.0, duration=0.01, namespace='pitch_hz') yield raises(jams.SchemaError)(jams.eval.melody), ref_ann, est_ann yield raises(jams.SchemaError)(jams.eval.melody), est_ann, ref_ann
def test_files(): # Expected output output = [ os.path.join(os.path.abspath(os.path.curdir), "data", s) for s in ["test1_22050.wav", "test1_44100.wav", "test2_8000.wav"] ] def __test(searchdir, ext, recurse, case_sensitive, limit, offset): files = librosa.util.find_files( searchdir, ext=ext, recurse=recurse, case_sensitive=case_sensitive, limit=limit, offset=offset ) s1 = slice(offset, None) s2 = slice(limit) assert set(files) == set(output[s1][s2]) for searchdir in [os.path.curdir, os.path.join(os.path.curdir, "data")]: for ext in [None, "wav", "WAV", ["wav"], ["WAV"]]: for recurse in [False, True]: for case_sensitive in [False, True]: for limit in [None, 1, 2]: for offset in [0, 1, -1]: tf = __test if searchdir == os.path.curdir and not recurse: tf = raises(AssertionError)(__test) if ext is not None and case_sensitive and (ext == "WAV" or set(ext) == set(["WAV"])): tf = raises(AssertionError)(__test) yield (tf, searchdir, ext, recurse, case_sensitive, limit, offset)
def test_tone(): def __test(frequency, sr, length, duration, phi): y = librosa.tone(frequency=frequency, sr=sr, length=length, duration=duration, phi=phi) if length is not None: assert len(y) == length else: assert len(y) == np.ceil(duration * sr) # Bad cases yield raises(librosa.ParameterError)(__test), None, 22050, 22050, 1, None yield raises(librosa.ParameterError)(__test), 440, 22050, None, None, np.pi for sr in [11025, 22050]: for length in [None, 22050]: for duration in [None, 0.5]: for phi in [None, np.pi]: if length is not None or duration is not None: yield __test, 440, sr, length, duration, phi
def test_chirp(): def __test(fmin, fmax, sr, length, duration, linear, phi): y = librosa.chirp(fmin=fmin, fmax=fmax, sr=sr, length=length, duration=duration, linear=linear, phi=phi) if length is not None: assert len(y) == length else: assert len(y) == np.ceil(duration * sr) # Bad cases yield raises(librosa.ParameterError)(__test), None, None, 22050, 22050, 1, False, None yield raises(librosa.ParameterError)(__test), 440, None, 22050, 22050, 1, False, None yield raises(librosa.ParameterError)(__test), None, 880, 22050, 22050, 1, False, None yield raises(librosa.ParameterError)(__test), 440, 880, 22050, None, None, False, None for sr in [11025, 22050]: for length in [None, 11025]: for duration in [None, 0.5]: for phi in [None, np.pi / 2]: if length is not None or duration is not None: yield __test, 440, 880, sr, length, duration, False, phi yield __test, 880, 440, sr, length, duration, True, phi
def test_init_wo_num(self): """ SimObject.__init__ should raise ValueError if num_* are not specified. """ raises(ValueError)(self.check_init_wo_num)() self.check_init_wo_num(num_i=0) self.check_init_wo_num(num_i=1)
def test_cmemsubsets_default(self): vc = self.new(self.VectCalcCMemSubSet) eq_(vc._cmemsubsets_parsed_.getall(), dict(vec=False, dot=True)) vc.subvec_dot() raises(ValueError)(vc.vec)() vc.getv('v1, v2') raises(KeyError)(vc.getv)('v3')
def test_load_fail(): # 1. test bad file path # 2. test non-json file # 3. test bad extensions # 4. test bad codecs def __test(filename, fmt): jams.load(filename, fmt=fmt) # Make a non-existent file tdir = tempfile.mkdtemp() yield raises(IOError)(__test), os.path.join(tdir, 'nonexistent.jams'), 'jams' os.rmdir(tdir) # Make a non-json file tdir = tempfile.mkdtemp() badfile = os.path.join(tdir, 'nonexistent.jams') with open(badfile, mode='w') as fp: fp.write('some garbage') yield raises(ValueError)(__test), os.path.join(tdir, 'nonexistent.jams'), 'jams' os.unlink(badfile) os.rmdir(tdir) tdir = tempfile.mkdtemp() for ext in ['txt', '']: badfile = os.path.join(tdir, 'nonexistent') yield raises(jams.ParameterError)(__test), '{:s}.{:s}'.format(badfile, ext), 'auto' yield raises(jams.ParameterError)(__test), '{:s}.{:s}'.format(badfile, ext), ext yield raises(jams.ParameterError)(__test), '{:s}.jams'.format(badfile), ext os.rmdir(tdir)
def test_delta(): # Note: this test currently only checks first-order differences def __test(width, order, axis, x): delta = librosa.feature.delta(x, width=width, order=order, axis=axis) # Check that trimming matches the expected shape eq_(x.shape, delta.shape) # Once we're sufficiently far into the signal (ie beyond half_len) # (x + delta)[t] should approximate x[t+1] if x is actually linear slice_orig = [slice(None)] * x.ndim slice_out = [slice(None)] * delta.ndim slice_orig[axis] = slice(width//2 + 1, -width//2 + 1) slice_out[axis] = slice(width//2, -width//2) assert np.allclose((x + delta)[slice_out], x[slice_orig]) x = np.vstack([np.arange(100.0)] * 3) for width in range(-1, 8): for slope in np.linspace(-2, 2, num=6): for bias in [-10, 0, 10]: for order in [0, 1]: for axis in range(x.ndim): tf = __test if width < 3 or np.mod(width, 2) != 1 or width > x.shape[axis]: tf = raises(librosa.ParameterError)(__test) if order != 1: tf = raises(librosa.ParameterError)(__test) yield tf, width, order, axis, slope * x + bias
def test_cqt(): sr = 11025 # Impulse train y = np.zeros(int(5.0 * sr)) y[::sr] = 1.0 # Hop size not long enough for num octaves # num_octaves = 6, 2**6 = 64 > 32 yield (raises(librosa.ParameterError)(__test_cqt_size), y, sr, 32, None, 72, 12, 0.0, 2, None, 1, 0.01) # Filters go beyond Nyquist. 500 Hz -> 4 octaves = 8000 Hz > 11000 Hz yield (raises(librosa.ParameterError)(__test_cqt_size), y, sr, 512, 500, 48, 12, 0.0, 2, None, 1, 0.01) # Test with fmin near Nyquist for fmin in [3000, 4800]: for n_bins in [1, 2]: for bins_per_octave in [12]: yield (__test_cqt_size, y, sr, 512, fmin, n_bins, bins_per_octave, 0.0, 2, None, 1, 0.01) # Test for no errors and correct output size for fmin in [None, librosa.note_to_hz('C2')]: for n_bins in [1, 12, 24, 48, 72, 74, 76]: for bins_per_octave in [12, 24]: for tuning in [None, 0, 0.25]: for resolution in [1, 2]: for norm in [1, 2]: yield (__test_cqt_size, y, sr, 512, fmin, n_bins, bins_per_octave, tuning, resolution, None, norm, 0.01)
def test_MLSADF(): def __test_synthesis(filt): # dummy source excitation source = __dummy_source() hopsize = 80 # dummy filter coef. windowed = __dummy_windowed_frames( source, frame_len=512, hopsize=hopsize) mc = np.apply_along_axis( pysptk.mcep, 1, windowed, filt.order, filt.alpha) b = np.apply_along_axis(pysptk.mc2b, 1, mc, filt.alpha) # synthesis synthesizer = Synthesizer(filt, hopsize) y = synthesizer.synthesis(source, b) assert np.all(np.isfinite(y)) from pysptk.synthesis import MLSADF def __test(order, alpha): __test_synthesis(MLSADF(order, alpha)) for order in [20, 25]: for alpha in [0.0, 0.41]: yield __test, order, alpha def __test_invalid_pade(pd): MLSADF(20, pd=pd) yield raises(ValueError)(__test_invalid_pade), 3 yield raises(ValueError)(__test_invalid_pade), 6
def check_query(backend_factory, backend_kwargs={}): message = pickle.dumps('some data') backend_kwargs.setdefault('type', 380) with nested(create_test_client(), closing(backend_factory( **backend_kwargs))) as (client, backend): with timedcontext(4): client.connect(server.server_address, sync=True) raises(OperationFailed)(lambda: client.query(fields={'to': backend.instance_id, 'workflow': 'some workflow'}, message=message, type=1136, timeout=1))() backend.connect(server.server_address, sync=True) th = TestThread(target=backend.handle_one) th.setDaemon(True) th.start() response = client.query(fields={'to': backend.instance_id, 'workflow': 'some workflow'}, message=message, type=1136, timeout=1) eq_(response.message, message) eq_(response.from_, backend.instance_id) eq_(response.to, client.instance_id) eq_(response.workflow, 'some workflow') th.join()
def test_with_data_calls_teardown_on_error(self): @self.fxt.with_data(StubDataset1, StubDataset2) def some_callable(data): raise RuntimeError("a very bad thing") raises(RuntimeError)(some_callable)() eq_(mock_call_log[0], (MockLoader, 'load', StubSuperSet)) eq_(mock_call_log[1], (MockLoader, 'unload'))
def make_dummy_ungridded_data_single_point(lat=0.0, lon=0.0, value=1.0, time=None, altitude=None, pressure=None, mask=None): from cis.data_io.Coord import CoordList, Coord from cis.data_io.ungridded_data import UngriddedData, Metadata import datetime import numpy x = Coord(numpy.array(lat), Metadata('latitude'), 'x') y = Coord(numpy.array(lon), Metadata('longitude'), 'y') if (time is not None) + (altitude is not None) + (pressure is not None) > 1: raises(NotImplementedError) elif time is None and altitude is None and pressure is None: coords = CoordList([x, y]) elif altitude is not None: z = Coord(numpy.array(altitude), Metadata('altitude'), 'z') coords = CoordList([x, y, z]) elif time is not None: t = Coord(numpy.array(time), Metadata('time'), 't') coords = CoordList([x, y, t]) elif pressure is not None: p = Coord(numpy.array(pressure), Metadata('air_pressure'), 'p') coords = CoordList([x, y, p]) data = numpy.array(value) if mask: data = ma.masked_array(data, mask=mask) return UngriddedData(data, Metadata(name='Rain', standard_name='rainfall_rate', long_name="Total Rainfall", units="kg m-2 s-1", missing_value=-999), coords)
def test_load_value_dict(): def new_network(): return tn.SequentialNode( "seq", [tn.InputNode("i", shape=(10, 100)), tn.LinearMappingNode( "lm", output_dim=15, inits=[treeano.inits.NormalWeightInit()])] ).network() n1 = new_network() n2 = new_network() fn1 = n1.function(["i"], ["lm"]) fn2 = n2.function(["i"], ["lm"]) x = np.random.randn(10, 100).astype(fX) def test(): np.testing.assert_equal(fn1(x), fn2(x)) # should fail nt.raises(AssertionError)(test)() # change weights canopy.network_utils.load_value_dict( n1, canopy.network_utils.to_value_dict(n2)) # should not fail test()
def test_load_value_dict_not_strict_keys(): n1 = tn.SequentialNode( "seq", [tn.InputNode("i", shape=(10, 100)), tn.LinearMappingNode( "lm", output_dim=15, inits=[treeano.inits.NormalWeightInit()])] ).network() n2 = tn.InputNode("i", shape=()).network() def test1(strict_keys): canopy.network_utils.load_value_dict( n1, canopy.network_utils.to_value_dict(n2), strict_keys=strict_keys) def test2(strict_keys): canopy.network_utils.load_value_dict( n2, canopy.network_utils.to_value_dict(n1), strict_keys=strict_keys) nt.raises(AssertionError)(test1)(strict_keys=True) nt.raises(AssertionError)(test2)(strict_keys=True) test1(strict_keys=False) test2(strict_keys=False)
def test_network_nanguard(): class CustomNode(treeano.NodeImpl): input_keys = () def compute_output(self, network): network.create_vw( "default", is_shared=True, shape=(), inits=[] ) network = CustomNode("c").network() # build eagerly to share weights network.build() fn = canopy.handlers.handled_fn( network, [canopy.handlers.network_nanguard()], {}, {}) vw = network["c"].get_vw("default") for x in [3, 4, 1e9, 9e9, -9e9, 0]: vw.variable.set_value(treeano.utils.as_fX(x)) fn({}) for x in [np.inf, -np.inf, np.nan, 2e10]: vw.variable.set_value(treeano.utils.as_fX(x)) nt.raises(Exception)(lambda x: fn(x))({})
def test_gcep_invalid_args(): x = windowed_dummy_data(1024) def __test_gamma(gamma): pysptk.gcep(x, gamma=gamma) yield raises(ValueError)(__test_gamma), 0.1 yield raises(ValueError)(__test_gamma), -2.1 def __test_itype(itype=0): pysptk.gcep(x, itype=itype) yield raises(ValueError)(__test_itype), -1 yield raises(ValueError)(__test_itype), 5 def __test_eps(etype=0, eps=0.0): pysptk.gcep(x, etype=etype, eps=eps) yield raises(ValueError)(__test_eps), 0, -1.0 yield raises(ValueError)(__test_eps), -1 yield raises(ValueError)(__test_eps), -3 yield raises(ValueError)(__test_eps), 1, -1.0 yield raises(ValueError)(__test_eps), 2, -1.0 def __test_min_det(min_det): pysptk.gcep(x, min_det=min_det) yield raises(ValueError)(__test_min_det), -1.0
def test_stack_memory(): def __test(data, n_steps, delay): data_stack = librosa.feature.stack_memory(data, n_steps=n_steps, delay=delay) # If we're one-dimensional, reshape for testing if data.ndim == 1: data = data.reshape((1, -1)) d, t = data.shape eq_(data_stack.shape[0], n_steps * d) eq_(data_stack.shape[1], t) for i in range(d): for step in range(1, n_steps): assert np.allclose(data[i, :- step * delay], data_stack[step * d + i, step * delay:]) srand() for ndim in [1, 2]: data = np.random.randn(* ([5] * ndim)) for n_steps in [-1, 0, 1, 2, 3, 4]: for delay in [-1, 0, 1, 2, 4]: tf = __test if n_steps < 1: tf = raises(librosa.ParameterError)(__test) if delay < 1: tf = raises(librosa.ParameterError)(__test) yield tf, data, n_steps, delay
def test_match_events_onesided(): events_from = np.asarray([5, 15, 25]) events_to = np.asarray([0, 10, 20, 30]) def __test(left, right, target): match = librosa.util.match_events(events_from, events_to, left=left, right=right) assert np.allclose(target, events_to[match]) yield __test, False, True, [10, 20, 30] yield __test, True, False, [0, 10, 20] # Make a right-sided fail events_from[0] = 40 yield raises(librosa.ParameterError)(__test), False, True, [10, 20, 30] # Make a left-sided fail events_from[0] = -1 yield raises(librosa.ParameterError)(__test), True, False, [10, 20, 30] # Make a two-sided fail events_from[0] = -1 yield raises(librosa.ParameterError)(__test), False, False, [10, 20, 30] # Make a two-sided success events_to[:-1] = events_from yield __test, False, False, events_from
def test_tmeasure_fail_span(): # Does not start at 0 ref = [[[1, 10]], [[1, 5], [5, 10]]] ref = [np.asarray(_) for _ in ref] yield raises(ValueError)(mir_eval.hierarchy.tmeasure), ref, ref # Does not end at the right time ref = [[[0, 5]], [[0, 5], [5, 6]]] ref = [np.asarray(_) for _ in ref] yield raises(ValueError)(mir_eval.hierarchy.tmeasure), ref, ref # Two annotaions of different shape ref = [[[0, 10]], [[0, 5], [5, 10]]] ref = [np.asarray(_) for _ in ref] est = [[[0, 15]], [[0, 5], [5, 15]]] est = [np.asarray(_) for _ in est] yield raises(ValueError)(mir_eval.hierarchy.tmeasure), ref, est
def test_deserialize(): for case in encoded_messages: msg = parse_message(MultiplexerMessage, case['encoded']) assert dict((field.name, value) for field, value in msg.ListFields()) \ == case['pythonized'] == dict_message(msg, all_fields=False) raises(DecodeError)(lambda: parse_message(VariousFields, ''))()
def test_ns_tag_msd_tagtraum_cd1(): def __test(tag, confidence=None): ann = Annotation(namespace='tag_msd_tagtraum_cd1') ann.append(time=0, duration=1, value=tag, confidence=confidence) ann.validate() for tag in ['reggae', 'pop/rock', 'rnb', 'jazz', 'vocal', 'new age', 'latin', 'rap', 'country', 'international', 'blues', 'electronic', 'folk']: yield __test, tag yield __test, six.u(tag) yield raises(SchemaError)(__test), tag.upper() for tag in [23, None]: yield raises(SchemaError)(__test), tag yield raises(SchemaError)(__test), 'folk', 1.2 yield raises(SchemaError)(__test), 'folk', -0.1 yield __test, 'folk', 1.0 yield __test, 'folk', 0.0
def test_ns_segment_open(): def __test(label): ann = Annotation(namespace='segment_open') ann.append(time=0, duration=1, value=label) ann.validate() for line in ['a segment', six.u('a unicode segment')]: yield __test, line for line in [23, None]: yield raises(SchemaError)(__test), line
def test_ns_lyrics(): def __test(lyric): ann = Annotation(namespace='lyrics') ann.append(time=0, duration=1, value=lyric) ann.validate() for line in ['Check yourself', six.u('before you wreck yourself')]: yield __test, line for line in [23, None]: yield raises(SchemaError)(__test), line
def test_ns_segment_salami_lower(): def __test(label): ann = Annotation(namespace='segment_salami_lower') ann.append(time=0, duration=1, value=label) ann.validate() for line in ['a', "a'", "a'''", "silence", "Silence", six.u('a')]: yield __test, line for line in [23, None, 'A', 'S', 'a23', ' Silence 23']: yield raises(SchemaError)(__test), line
def test_mgclsp2sp(): def __test(order, alpha, gamma, fftlen): np.random.seed(98765) src = np.random.rand(order + 1) dst = pysptk.mgclsp2sp(src, alpha, gamma, fftlen) assert len(dst) == (fftlen >> 1) + 1 assert np.all(np.isfinite(dst)) # TODO warn("Inf/-Inf wiil happens when gamma = 0.0") for order in [15, 20, 25, 30]: for alpha in [0.35, 0.41, 0.5]: for gamma in [-1.0, -0.5]: for fftlen in [256, 512, 1024]: yield __test, order, alpha, gamma, fftlen # invalid gamma yield raises(ValueError)(__test), 20, 0.0, 0.1, 256 # invalid fftlen yield raises(ValueError)(__test), 20, 0.0, -0.1, 255 yield raises(ValueError)(__test), 20, 0.0, -0.1, 257
def test_ns_key_mode(): def __test(keymode): ann = Annotation(namespace='key_mode') ann.append(time=0, duration=0, value=keymode, confidence=None) ann.validate() for val in ['B#:locrian', six.u('A:minor'), 'N', 'E']: yield __test, val for val in ['asdf', 'A&:phrygian', 11, '', ':dorian', None]: yield raises(SchemaError)(__test), val
def test_sparsify_rows(): def __test(n, d, q): X = np.random.randn(*([d] * n))**4 X = np.asarray(X) xs = librosa.util.sparsify_rows(X, quantile=q) if ndim == 1: X = X.reshape((1, -1)) assert np.allclose(xs.shape, X.shape) # And make sure that xs matches X on nonzeros xsd = np.asarray(xs.todense()) for i in range(xs.shape[0]): assert np.allclose(xsd[i, xs[i].indices], X[i, xs[i].indices]) # Compute row-wise magnitude marginals v_in = np.sum(np.abs(X), axis=-1) v_out = np.sum(np.abs(xsd), axis=-1) # Ensure that v_out retains 1-q fraction of v_in assert np.all(v_out >= (1.0 - q) * v_in) for ndim in range(1, 4): for d in [1, 5, 10, 100]: for q in [-1, 0.0, 0.01, 0.25, 0.5, 0.99, 1.0, 2.0]: tf = __test if ndim not in [1, 2]: tf = raises(ValueError)(__test) if not 0.0 <= q < 1: tf = raises(ValueError)(__test) yield tf, ndim, d, q
def check_arrayaccess(clibname, list_num, list_cdt, cdt, dim, _calloc_=None, carrtype=None): """Check C side array access""" if cdt in [ 'char', 'short', 'ushort', 'int', 'uint', 'long', 'ulong', 'longlong', 'ulonglong', 'bool', 'size_t' ]: ass_eq = assert_equal elif cdt in ['float', 'double', 'longdouble']: ass_eq = assert_almost_equal ArrayAccess = gene_class_ArrayAccess(clibname, len(list_num), list_cdt, carrtype) num_dict = dict(zip(ArrayAccess.num_names, list_num)) # {num_i: 6, ...} if _calloc_ is not None: num_dict.update(_calloc_=_calloc_) aa = ArrayAccess(**num_dict) aa.fill() # arr_via_ret should return same array (garr) garr = aa.arr_via_ret(cdt, dim) arr = aa.arr(cdt, dim) ass_eq(garr, arr) # insert completely different value to 'arr' if cdt == 'char': arr.flat = alpharange(100, numpy.prod(arr.shape) + 100) elif cdt == 'bool': arr[:] = -arr else: arr += 100 raises(AssertionError)(assert_equal)(garr, arr) # get array (garr2) via arr_via_ret again garr2 = aa.arr_via_ret(cdt, dim) assert_equal(garr2, arr)
def test_exception_propagation(): for raised, expected in [(StopIteration, StopIteration), (CustomException, Exception)]: callback = ExtCallback((4, 4), 250, np.int32, exception_class=raised) for num_workers in [1, 4]: for batch_size in [1, 15, 150]: pipe = create_pipe(callback, 'cpu', batch_size, py_num_workers=num_workers, py_start_method='spawn', parallel=True) yield raises(expected)( build_and_run_pipeline), pipe, None, raised, expected
def test_batch_pad(): def tmp(include_batch_pad): network = tn.SequentialNode( "seq", [tn.InputNode("i", shape=(None, 2)), tn.ApplyNode("a", fn=(lambda x: x.shape[0].astype(fX) + x), shape_fn=(lambda s: s))] ).network() handlers = [canopy.handlers.chunk_variables(3, ["i"])] if include_batch_pad: handlers.insert(0, canopy.handlers.batch_pad(3, ["x"])) fn = canopy.handlers.handled_fn(network, handlers, {"x": "i"}, {"out": "seq"}) return fn({"x": np.zeros((16, 2), dtype=fX)}) nt.raises(AssertionError)(tmp)(False) res = tmp(True) np.testing.assert_equal(res["out"], np.ones((18, 2), dtype=fX) * 3)
def test_network_nanguard(): class CustomNode(treeano.NodeImpl): input_keys = () def compute_output(self, network): network.create_vw("default", is_shared=True, shape=(), inits=[]) network = CustomNode("c").network() # build eagerly to share weights network.build() fn = canopy.handlers.handled_fn(network, [canopy.handlers.network_nanguard()], {}, {}) vw = network["c"].get_vw("default") for x in [3, 4, 1e9, 9e9, -9e9, 0]: vw.variable.set_value(treeano.utils.as_fX(x)) fn({}) for x in [np.inf, -np.inf, np.nan, 2e10]: vw.variable.set_value(treeano.utils.as_fX(x)) nt.raises(Exception)(lambda x: fn(x))({})
def test_midi_to_note(): def __test(midi_num, note, octave, cents): note_out = librosa.midi_to_note(midi_num, octave=octave, cents=cents) eq_(note_out, note) midi_num = 24.25 yield __test, midi_num, 'C', False, False yield __test, midi_num, 'C1', True, False yield raises(librosa.ParameterError)(__test), midi_num, 'C+25', False, True yield __test, midi_num, 'C1+25', True, True yield __test, [midi_num], ['C'], False, False
def test_swipe(): def __test(x, fs, hopsize, otype): f0 = pysptk.swipe(x, fs, hopsize, otype=otype) assert np.all(np.isfinite(f0)) if otype == 1: assert np.all(f0 >= 0) np.random.seed(98765) fs = 16000 x = np.random.rand(16000) for hopsize in [40, 80, 160, 320]: for otype in [0, 1, 2]: yield __test, x, fs, hopsize, otype for otype in ["pitch", "f0", "logf0"]: yield __test, x, fs, 80, otype # unsupported otype yield raises(ValueError)(__test), x, fs, 80, -1 yield raises(ValueError)(__test), x, fs, 80, 3 yield raises(ValueError)(__test), x, fs, 80, "ff0"
def test_invalid_interval_additions(): i = Interval((0, False), (1, False)) vals = [Interval((-1, True), (0, False)), Interval((-1, False), (0, False)), Interval((1, False), (2, True)), Interval((1, False), (2, False)), Interval((3, True), (4, False)), Interval((3, False), (4, True)), Interval((3, False), (4, False)) ] for v in vals: yield raises(ValueError)(_add_intervals), i, v
def test_agcep(): def __test(order, stage): x = windowed_dummy_data(64) c = np.zeros(order + 1) for v in x: pysptk.agcep(v, c, stage=stage) assert np.all(np.isfinite(c)) for order in [20, 22, 25]: for stage in six.moves.range(1, 10): yield __test, order, stage # invalid stage yield raises(ValueError)(__test), 20, 0
def test_conv_parse_pad(): tests = [ [(3, 4, 5), "full", (2, 3, 4)], [(3, 4, 5), "valid", (0, 0, 0)], [(3, 5, 7), "same", (1, 2, 3)], [(1, 1), "same", (0, 0)], [(1, 1), (3, 3), (3, 3)], ] for filter_size, pad, ans in tests: nt.assert_equal(ans, tn.conv.conv_parse_pad(filter_size, pad)) fails_fn = nt.raises(AssertionError)(tn.conv.conv_parse_pad) fails_fn((2,), "same") fails_fn((2, 3), (1, 2, 3))
def test_c2acr(): for src_order in [15, 20, 25, 30]: for dst_order in [15, 20, 25, 30]: for fftlen in [256, 512, 1024]: yield __test_transform_base, pysptk.b2c, src_order, dst_order, fftlen def __test_fftlen(fftlen): pysptk.c2acr(np.ones(20), 19, fftlen) for fftlen in [257, 513]: yield raises(ValueError)(__test_fftlen), fftlen def __test_small_fftsize(): yield raises(ValueError)(__test_fftlen), 16
def test_nested_decorators(self): from nose.tools import raises, timed, with_setup def test(): pass def foo(): pass test = with_setup(foo, foo)(test) test = timed(1.0)(test) test = raises(TypeError)(test) assert test.setup == foo assert test.teardown == foo
def test_valid_intervals(): def __test(intval): librosa.util.valid_intervals(intval) for d in range(1, 4): for n in range(1, 4): ivals = np.ones(d * [n]) for m in range(1, 3): slices = [slice(m)] * d if m == 2 and d == 2 and n > 1: yield __test, ivals[slices] else: yield raises(librosa.ParameterError)(__test), ivals[slices]
def test_empty_dataset(): class EmptyDataSource(FileDataSource): def collect_files(self): return [] def collect_features(path): pass X = FileSourceDataset(EmptyDataSource()) def __test_outof_range(X): print(X[0]) # Should raise IndexError yield raises(IndexError)(__test_outof_range), X
def test_linear_pitchshift(): def __test(n, lower, upper, jam): D = muda.deformers.LinearPitchShift(n_samples=n, lower=lower, upper=upper) jam_orig = deepcopy(jam) n_samples = 0 for jam_new in D.transform(jam): # Verify that the original jam reference hasn't changed assert jam_new is not jam __test_pitch(jam_orig, jam, 0.0, 0.0) # Verify that the state and history objects are intact __test_deformer_history(D, jam_new.sandbox.muda.history[-1]) d_state = jam_new.sandbox.muda.history[-1]['state'] d_tones = d_state['n_semitones'] tuning = d_state['tuning'] assert lower <= d_tones <= 2.0**upper __test_pitch(jam_orig, jam_new, d_tones, tuning) n_samples += 1 eq_(n, n_samples) for n in [1, 3, 5]: for lower in [-3, -1, 0.0]: for upper in [1, 3]: yield __test, n, lower, upper, jam_fixture for bad_samples in [-3, 0]: yield raises(ValueError)(__test), bad_samples, -1, 1, jam_fixture for bad_int in [(-1, -3), (2, 1)]: yield raises(ValueError)(__test), 3, bad_int[0], bad_int[1], jam_fixture
def test_user_properties(): created_at = datetime.fromtimestamp(1331764344) last_request_at = datetime.fromtimestamp(1331764345) last_impression_at = datetime.fromtimestamp(1331764346) user = User() user.email = '*****@*****.**' user.user_id = 1234 user.name = 'Somebody' user.last_seen_ip = '192.168.1.100' user.last_seen_user_agent = 'Mozilla/5.0' user.last_request_at = last_request_at user.last_impression_at = last_impression_at user.created_at = created_at user.unsubscribed_from_emails = True user.custom_data = {'name': 'Ace'} user.companies = [{'id': 1, 'name': 'Intercom', 'created_at': created_at}] try: # cannot set the relationship score user.relationship_score = 50 raise AttributeError except AttributeError: pass eq_(user.email, '*****@*****.**') eq_(user.user_id, 1234) eq_(user.name, 'Somebody') eq_(user.last_seen_ip, '192.168.1.100') eq_(user.last_seen_user_agent, 'Mozilla/5.0') eq_(user.last_request_at, last_request_at) eq_(user.last_impression_at, last_impression_at) eq_(user.relationship_score, None) eq_(user.created_at, created_at) eq_(user.unsubscribed_from_emails, True) eq_(user.custom_data['name'], 'Ace') eq_(user.session_count, 0) raises(AttributeError, lambda: user.companies)
def test_recurrence_matrix(): def __test(n, k, width, sym, metric): srand() # Make a data matrix data = np.random.randn(3, n) D = librosa.segment.recurrence_matrix(data, k=k, width=width, sym=sym, axis=-1, metric=metric) # First test for symmetry if sym: assert np.allclose(D, D.T) # Test for target-axis invariance D_trans = librosa.segment.recurrence_matrix(data.T, k=k, width=width, sym=sym, axis=0, metric=metric) assert np.allclose(D, D_trans) # If not symmetric, test for correct number of links if not sym and k is not None: real_k = min(k, n - width) assert not np.any(D.sum(axis=1) != real_k) # Make sure the +- width diagonal is hollow # It's easier to test if zeroing out the triangles leaves nothing idx = np.tril_indices(n, k=width) D[idx] = False D.T[idx] = False assert not np.any(D) for n in [20, 250]: for k in [None, n // 4]: for sym in [False, True]: for width in [-1, 0, 1, 3, 5]: for metric in ['l2', 'cosine']: tester = __test if width < 1: tester = raises(librosa.ParameterError)(__test) yield tester, n, k, width, sym, metric
def test_beat(): y, sr = librosa.load(__EXAMPLE_FILE) hop_length = 512 onset_env = librosa.onset.onset_strength(y=y, sr=sr, hop_length=hop_length) def __test(with_audio, with_tempo, start_bpm, bpm, trim, tightness): if with_audio: _y = y _ons = None else: _y = None _ons = onset_env tempo, beats = librosa.beat.beat_track(y=_y, sr=sr, onset_envelope=_ons, hop_length=hop_length, start_bpm=start_bpm, tightness=tightness, trim=trim, bpm=bpm) assert tempo >= 0 if len(beats) > 0: assert beats.min() >= 0 assert beats.max() <= len(onset_env) for with_audio in [False, True]: for with_tempo in [False, True]: for trim in [False, True]: for start_bpm in [-20, 0, 60, 120, 240]: for bpm in [-20, 0, None, 150, 360]: for tightness in [0, 100, 10000]: if (tightness <= 0 or (bpm is not None and bpm <= 0) or (start_bpm is not None and bpm is None and start_bpm <= 0)): tf = raises(librosa.ParameterError)(__test) else: tf = __test yield (tf, with_audio, with_tempo, start_bpm, bpm, trim, tightness)
def test_log_timestretch(): def __test(n, lower, upper, jam): D = muda.deformers.LogspaceTimeStretch(n_samples=n, lower=lower, upper=upper) jam_orig = deepcopy(jam) n_samples = 0 for jam_new in D.transform(jam): # Verify that the original jam reference hasn't changed assert jam_new is not jam __test_time(jam_orig, jam, 1.0) # Verify that the state and history objects are intact __test_deformer_history(D, jam_new.sandbox.muda.history[-1]) d_state = jam_new.sandbox.muda.history[-1]['state'] d_rate = d_state['rate'] assert 2.0**lower <= d_rate <= 2.0**upper __test_time(jam_orig, jam_new, d_rate) n_samples += 1 eq_(n, n_samples) for n in [1, 3, 5]: for lower in [-1, -0.5, 0.0]: for upper in [0.5, 1.0]: yield __test, n, lower, upper, jam_fixture for bad_samples in [-3, 0]: yield raises(ValueError)(__test), bad_samples, -1, 1, jam_fixture for bad_int in [(-1, -3), (2, 1)]: yield raises(ValueError)(__test), 3, bad_int[0], bad_int[1], jam_fixture
def test_MGLSADF(): from pysptk.synthesis import MGLSADF def __test_synthesis(filt): # dummy source excitation source = __dummy_source() hopsize = 80 # dummy filter coef. windowed = __dummy_windowed_frames(source, frame_len=512, hopsize=hopsize) gamma = -1.0 / filt.stage mgc = np.apply_along_axis(pysptk.mgcep, 1, windowed, filt.order, filt.alpha, gamma) b = np.apply_along_axis(pysptk.mgc2b, 1, mgc, filt.alpha, gamma) # synthesis synthesizer = Synthesizer(filt, hopsize) y = synthesizer.synthesis(source, b) assert np.all(np.isfinite(y)) def __test(order, alpha, stage): __test_synthesis(MGLSADF(order, alpha, stage)) for order in [20, 25]: for alpha in [0.0, 0.41]: for stage in [2, 5, 10]: yield __test, order, alpha, stage def __test_invalid_stage(stage): MGLSADF(20, stage=stage) yield raises(ValueError)(__test_invalid_stage), -1 yield raises(ValueError)(__test_invalid_stage), 0
def test_ns_tag_msd_tagtraum_cd1(): def __test(tag, confidence=None): ann = Annotation(namespace='tag_msd_tagtraum_cd1') ann.append(time=0, duration=1, value=tag, confidence=confidence) ann.validate() for tag in [ 'reggae', 'pop/rock', 'rnb', 'jazz', 'vocal', 'new age', 'latin', 'rap', 'country', 'international', 'blues', 'electronic', 'folk' ]: yield __test, tag yield __test, six.u(tag) yield raises(SchemaError)(__test), tag.upper() for tag in [23, None]: yield raises(SchemaError)(__test), tag yield raises(SchemaError)(__test), 'folk', 1.2 yield raises(SchemaError)(__test), 'folk', -0.1 yield __test, 'folk', 1.0 yield __test, 'folk', 0.0
def test_lmeasure_fail_span(): # Does not start at 0 ref = [[[1, 10]], [[1, 5], [5, 10]]] ref_lab = [['A'], ['a', 'b']] ref = [np.asarray(_) for _ in ref] yield (raises(ValueError)(mir_eval.hierarchy.lmeasure), ref, ref_lab, ref, ref_lab) # Does not end at the right time ref = [[[0, 5]], [[0, 5], [5, 6]]] ref = [np.asarray(_) for _ in ref] yield (raises(ValueError)(mir_eval.hierarchy.lmeasure), ref, ref_lab, ref, ref_lab) # Two annotations of different shape ref = [[[0, 10]], [[0, 5], [5, 10]]] ref = [np.asarray(_) for _ in ref] est = [[[0, 15]], [[0, 5], [5, 15]]] est = [np.asarray(_) for _ in est] yield (raises(ValueError)(mir_eval.hierarchy.lmeasure), ref, ref_lab, est, ref_lab)