def test_from_spectra(self, array): min_ = self.TEST_ARRAY.min(axis=0) max_ = self.TEST_ARRAY.max(axis=0) mean = self.TEST_ARRAY.mean(axis=0) # check basic stack works new = self.TEST_ARRAY.from_spectra(mean, min_, max_, dt=1) assert new.shape == (3, min_.size) assert new.name == mean.name assert new.epoch == mean.epoch assert new.f0 == mean.f0 assert new.df == mean.df assert new.unit == mean.unit assert new.dt == 1 * units.second utils.assert_array_equal( new.value, numpy.vstack((mean.value, min_.value, max_.value))) # check kwargs new = self.TEST_ARRAY.from_spectra(mean, min_, max_, dt=2, epoch=0, f0=100, df=.5, unit='meter', name='test') assert new.name == 'test' assert new.epoch.gps == 0 assert new.f0 == 100 * units.Hertz assert new.df == 0.5 * units.Hertz assert new.unit == units.meter # check error on timing with pytest.raises(ValueError): self.TEST_ARRAY.from_spectra(mean)
def _test_read_write(self, format, extension=None, auto=True, exclude=[], readkwargs={}, writekwargs={}): """Helper method for testing unified I/O for `Array` instances """ if extension is None: extension = format extension = '.%s' % extension.lstrip('.') try: fp = tempfile.mktemp(suffix=extension) self.TEST_ARRAY.write(fp, format=format, **writekwargs) if auto: self.TEST_ARRAY.write(fp, **writekwargs) b = self.TEST_ARRAY.read(fp, self.TEST_ARRAY.name, format=format, **readkwargs) if auto: self.TEST_ARRAY.read(fp, self.TEST_ARRAY.name, **readkwargs) utils.assert_array_equal(self.TEST_ARRAY, b, exclude=exclude) return b finally: if os.path.exists(fp): os.remove(fp)
def test_getitem(self, array, create_kwargs): array = self.create(name='test_getitem', **create_kwargs) # test element returns as quantity element = array[0, 0] assert element == array[0][0] assert isinstance(element, units.Quantity) utils.assert_quantity_equal(element, array.value[0, 0] * array.unit) # test column slice returns as _columnclass utils.assert_quantity_sub_equal(array[2], array[2, :]) column = array[0, 0::2] utils.assert_quantity_sub_equal(column, self.TEST_CLASS._columnclass( array.value[0, 0::2], x0=array.y0, dx=array.dy*2, name=array.name, channel=array.channel, unit=array.unit, epoch=array.epoch)) # test row slice returns as _rowclass row = array[1:10:3, 0] utils.assert_array_equal(row.value, array.value[1:10:3, 0]) utils.assert_quantity_sub_equal(row, self.TEST_CLASS._rowclass( array.value[1:10:3, 0], x0=array.x0+array.dx, dx=array.dx*3, name=array.name, channel=array.channel, unit=array.unit), exclude=['epoch']) # test dual slice returns type(self) with metadata subarray = array[1:5:2, 1:5:2] utils.assert_quantity_sub_equal(subarray, self.TEST_CLASS( array.value[1:5:2, 1:5:2], x0=array.x0+array.dx, dx=array.dx*2, y0=array.y0+array.dy, dy=array.dy*2, name=array.name, channel=array.channel, unit=array.unit), exclude=['epoch'])
def test_add_state_segments(self): fig, ax = self.new() # mock up some segments and add them as 'state' segments segs = SegmentList([Segment(1, 2), Segment(4, 5)]) segax = fig.add_state_segments(segs) # check that the new axes aligns with the parent utils.assert_array_equal(segax.get_position().intervalx, ax.get_position().intervalx) coll = segax.collections[0] for seg, path in zip(segs, coll.get_paths()): utils.assert_array_equal( path.vertices, [(seg[0], -.4), (seg[1], -.4), (seg[1], .4), (seg[0], .4), (seg[0], -.4)]) with pytest.raises(ValueError): fig.add_state_segments(segs, location='left') # test that this doesn't work with non-timeseries axes fig = self.FIGURE_CLASS() ax = fig.gca(projection='rectilinear') with pytest.raises(ValueError) as exc: fig.add_state_segments(segs) assert str(exc.value) == ("No 'timeseries' Axes found, cannot anchor " "new segment Axes.")
def test_init(self, array): utils.assert_array_equal(array.value, self.data) utils.assert_array_equal(array.bins.value, self.bins) assert array.x0 == 0 * units.Hertz assert array.df == 1 * units.Hertz assert array.y0 == self.bins[0] assert array.dy == self.bins[1] - self.bins[0]
def test_read_ligolw(self): with tempfile.NamedTemporaryFile(mode='w+') as fobj: fobj.write(LIGO_LW_ARRAY) array = FrequencySeries.read( fobj, 'psd', match={'channel': 'X1:TEST-CHANNEL_1'}) utils.assert_array_equal(array, list(range(1, 11)) / units.Hz) utils.assert_array_equal(array.frequencies, list(range(10)) * units.Hz) assert numpy.isclose(array.epoch.gps, 1000000000) # precision gah! assert array.unit == units.Hz**-1 array2 = FrequencySeries.read( fobj, 'psd', match={'channel': 'X1:TEST-CHANNEL_2'}) assert array2.epoch is None # assert errors with pytest.raises(ValueError): FrequencySeries.read(fobj, 'blah') with pytest.raises(ValueError): FrequencySeries.read(fobj, 'psd') with pytest.raises(ValueError): FrequencySeries.read(fobj, 'psd', match={ 'channel': 'X1:TEST-CHANNEL_1', 'blah': 'blah' })
def test_add_state_segments(self): fig, ax = self.new() # mock up some segments and add them as 'state' segments segs = SegmentList([Segment(1, 2), Segment(4, 5)]) segax = fig.add_state_segments(segs) # check that the new axes aligns with the parent utils.assert_array_equal(segax.get_position().intervalx, ax.get_position().intervalx) coll = segax.collections[0] for seg, path in zip(segs, coll.get_paths()): utils.assert_array_equal(path.vertices, [(seg[0], -.4), (seg[1], -.4), (seg[1], .4), (seg[0], .4), (seg[0], -.4)]) with pytest.raises(ValueError): fig.add_state_segments(segs, location='left') # test that this doesn't work with non-timeseries axes fig = self.FIGURE_CLASS() ax = fig.gca(projection='rectilinear') with pytest.raises(ValueError) as exc: fig.add_state_segments(segs) assert str(exc.value) == ("No 'timeseries' Axes found, cannot anchor " "new segment Axes.")
def test_to_file_from_file(self): data = np.array([[1, 0, 0, 1], [1, 1, 0, 0]]) sheet = EvaluationSheet(data) with temp_file() as fname: sheet.to_file(fname) newsheet = EvaluationSheet.from_file(fname) assert_array_equal(sheet.data, newsheet.data)
def test_getitem(self, array): array = self.create() assert array[0, 0] == array[0][0] utils.assert_array_equal(array[0].value, array.value[0]) assert isinstance(array[0], self.TEST_CLASS._columnclass) assert isinstance(array[0][0], units.Quantity) assert array[0].unit == array.unit assert array[0][0].unit == array.unit assert isinstance(array[:, 0], self.TEST_CLASS._rowclass)
def test_filter(self, array): a2 = array.filter([100], [1], 1e-2) assert isinstance(a2, type(array)) utils.assert_quantity_equal(a2.frequencies, array.frequencies) # manually rebuild the filter to test it works b, a, = signal.zpk2tf([100], [1], 1e-2) fresp = abs(signal.freqs(b, a, array.frequencies.value)[1]) utils.assert_array_equal(a2.value, fresp * array.value)
def test_coalesce(self): a = self.TEST_CLASS() a.append(self.ENTRY_CLASS([1, 2, 3, 4, 5], x0=0, dx=1)) a.append(self.ENTRY_CLASS([1, 2, 3, 4, 5], x0=11, dx=1)) a.append(self.ENTRY_CLASS([1, 2, 3, 4, 5], x0=5, dx=1)) a.coalesce() assert len(a) == 2 assert a[0].span == (0, 10) utils.assert_array_equal(a[0].value, [1, 2, 3, 4, 5, 1, 2, 3, 4, 5])
def test_math(self, array): array.override_unit('Hz') # test basic operations arraysq = array ** 2 utils.assert_array_equal(arraysq.value, self.data ** 2) assert arraysq.unit == units.Hz ** 2 assert arraysq.name == array.name assert arraysq.epoch == array.epoch assert arraysq.channel == array.channel
def test_math(self, array): array.override_unit('Hz') # test basic operations arraysq = array**2 utils.assert_array_equal(arraysq.value, self.data**2) assert arraysq.unit == units.Hz**2 assert arraysq.name == array.name assert arraysq.epoch == array.epoch assert arraysq.channel == array.channel
def test_plot(self, array): with rc_context(rc={'text.usetex': False}): plot = array.plot() assert isinstance(plot, TimeSeriesPlot) line = plot.gca().lines[0] utils.assert_array_equal(line.get_xdata(), array.xindex.value) utils.assert_array_equal(line.get_ydata(), array.value) with tempfile.NamedTemporaryFile(suffix='.png') as f: plot.save(f.name) return plot # allow subclasses to extend tests
def test_plot(self, array): with rc_context(rc={'text.usetex': False}): plot = array.plot() assert isinstance(plot, FrequencySeriesPlot) assert isinstance(plot.gca(), FrequencySeriesAxes) line = plot.gca().lines[0] utils.assert_array_equal(line.get_xdata(), array.xindex.value) utils.assert_array_equal(line.get_ydata(), array.value) with tempfile.NamedTemporaryFile(suffix='.png') as f: plot.save(f.name)
def test_CacheEvaluationListener(): l = CacheEvaluationListener() scores = BaseScoresheet({1: 10, 2: 5}) ev = EvaluationSheet(scores, {1}) smokesignal.emit('evaluation_finished', ev, 'd', 'p') ev2 = EvaluationSheet.from_file(l.fname) assert_array_equal(ev.data, ev2.data) smokesignal.clear_all() os.unlink(l.fname)
def test_append(self, array): a2 = self.create(x0=array.xspan[1]) # test basic append a3 = array.append(a2, inplace=False) assert a3.epoch == array.epoch assert a3.x0 == array.x0 assert a3.size == array.size+a2.size assert a3.xspan == array.xspan+a2.xspan utils.assert_array_equal(a3.value[:array.shape[0]], array.value) utils.assert_array_equal(a3.value[-a2.shape[0]:], a2.value) # check that appending again causes a problem with pytest.raises(ValueError): a3.append(array) # test appending with one xindex deletes it in the output array.xindex a3 = array.append(a2, inplace=False) assert hasattr(a3, '_xindex') is False # test appending with both xindex appends as well array.xindex a2.xindex a3 = array.append(a2, inplace=False) assert hasattr(a3, '_xindex') utils.assert_array_equal( a3.xindex.value, numpy.concatenate((array.xindex.value, a2.xindex.value))) # test appending with one only and not resize del a2.xindex a3 = array.append(a2, inplace=False, resize=False) assert a3.x0 == array.x0 + array.dx * a2.shape[0] # test discontiguous appends - gap='raise' a3 = self.create(x0=array.xspan[1] + 1) ts4 = array.copy() with pytest.raises(ValueError): array.append(a3) # gap='ignore' ts4.append(a3, gap='ignore') assert ts4.shape[0] == array.shape[0] + a3.shape[0] utils.assert_array_equal( ts4.value, numpy.concatenate((array.value, a3.value))) # gap='pad' ts4 = array.copy() ts4.append(a3, gap='pad', pad=0) assert ts4.shape[0] == array.shape[0] + 1 + a3.shape[0] z = numpy.zeros((1,) + array.shape[1:]) utils.assert_array_equal( ts4.value, numpy.concatenate((array.value, z, a3.value)))
def test_new(self): """Test Array creation """ # test basic empty contructor with pytest.raises(TypeError): self.TEST_CLASS() # test with some data array = self.create() utils.assert_array_equal(array.value, self.data) return array
def test_append(self, array): a2 = self.create(x0=array.xspan[1]) # test basic append a3 = array.append(a2, inplace=False) assert a3.epoch == array.epoch assert a3.x0 == array.x0 assert a3.size == array.size + a2.size assert a3.xspan == array.xspan + a2.xspan utils.assert_array_equal(a3.value[:array.shape[0]], array.value) utils.assert_array_equal(a3.value[-a2.shape[0]:], a2.value) # check that appending again causes a problem with pytest.raises(ValueError): a3.append(array) # test appending with one xindex deletes it in the output array.xindex a3 = array.append(a2, inplace=False) assert hasattr(a3, '_xindex') is False # test appending with both xindex appends as well array.xindex a2.xindex a3 = array.append(a2, inplace=False) assert hasattr(a3, '_xindex') utils.assert_array_equal( a3.xindex.value, numpy.concatenate((array.xindex.value, a2.xindex.value))) # test appending with one only and not resize del a2.xindex a3 = array.append(a2, inplace=False, resize=False) assert a3.x0 == array.x0 + array.dx * a2.shape[0] # test discontiguous appends - gap='raise' a3 = self.create(x0=array.xspan[1] + 1) ts4 = array.copy() with pytest.raises(ValueError): array.append(a3) # gap='ignore' ts4.append(a3, gap='ignore') assert ts4.shape[0] == array.shape[0] + a3.shape[0] utils.assert_array_equal(ts4.value, numpy.concatenate((array.value, a3.value))) # gap='pad' ts4 = array.copy() ts4.append(a3, gap='pad', pad=0) assert ts4.shape[0] == array.shape[0] + 1 + a3.shape[0] z = numpy.zeros((1, ) + array.shape[1:]) utils.assert_array_equal(ts4.value, numpy.concatenate((array.value, z, a3.value)))
def test_add_loudest(self, usetex, table): table.add_column(table.Column(data=['test'] * len(table), name='test')) loudest = table[table['snr'].argmax()] t, f, s = loudest['time'], loudest['frequency'], loudest['snr'] # make plot fig, ax = self.new() ax.scatter(table['time'], table['frequency']) tpos = ax.title.get_position() # call function coll, text = ax.add_loudest( table, 'snr', # table, rank 'time', 'frequency', # x, y 'test', # extra columns to print 'time', # duplicate (shouldn't get printed) ) # check marker was placed at the right point utils.assert_array_equal(coll.get_offsets(), [(t, f)]) # check text result = ('Loudest event: Time = {0}, Frequency = {1}, SNR = {2}, ' 'Test = test'.format(*('{0:.2f}'.format(x) for x in (t, f, s)))) assert text.get_text() == result assert text.get_position() == (.5, 1.) # assert title got moved assert ax.title.get_position() == (tpos[0], tpos[1] + .05) # -- with more kwargs _, t = ax.add_loudest(table, 'snr', 'time', 'frequency', position=(0., 0.), ha='left', va='top') assert t.get_position() == (0., 0.) # assert title doesn't get moved again if we specify position assert ax.title.get_position() == (tpos[0], tpos[1] + .05) # assert kw handling assert t.get_horizontalalignment() == 'left' assert t.get_verticalalignment() == 'top' self.save_and_close(fig)
def test_prepend(self, array): """Test the `Series.prepend` method """ a2 = self.create(x0=array.xspan[1]) * 2 a3 = a2.prepend(array, inplace=False) assert a3.x0 == array.x0 assert a3.size == array.size + a2.size assert a3.xspan == array.xspan + a2.xspan with pytest.raises(ValueError): a3.prepend(array) utils.assert_array_equal(a3.value[:array.shape[0]], array.value) utils.assert_array_equal(a3.value[-a2.shape[0]:], a2.value)
def test_normalize_fft_params(self): """Test :func:`gwpy.signal.fft.ui.normalize_fft_params` """ ftp = fft_ui.normalize_fft_params( TimeSeries(numpy.zeros(1024), sample_rate=256)) assert ftp == {'nfft': 1024, 'noverlap': 0} ftp = fft_ui.normalize_fft_params( TimeSeries(numpy.zeros(1024), sample_rate=256), {'window': 'hann'}) win = signal.get_window('hann', 1024) assert ftp.pop('nfft') == 1024 assert ftp.pop('noverlap') == 512 utils.assert_array_equal(ftp.pop('window'), win) assert not ftp
def _test_read_write_ascii(self, format='txt'): extension = '.%s' % format.lstrip('.') try: with tempfile.NamedTemporaryFile(suffix=extension, mode='w', delete=False) as f: self.TEST_ARRAY.write(f.name, format=format) self.TEST_ARRAY.write(f.name) b = self.TEST_ARRAY.read(f.name, format=format) self.TEST_ARRAY.read(f.name) utils.assert_array_equal(self.TEST_ARRAY.value, b.value) finally: if os.path.exists(f.name): os.remove(f.name)
def test_read_pycbc_live(self): import h5py table = self.create( 100, names=['a', 'b', 'c', 'chisq', 'd', 'e', 'f', 'mass1', 'mass2', 'snr']) table.meta['ifo'] = 'X1' fp = os.path.join(tempfile.mkdtemp(), 'X1-Live-0-0.hdf') try: # write table in pycbc_live format (by hand) with h5py.File(fp, 'w') as h5f: group = h5f.create_group('X1') for col in table.columns: group.create_dataset(data=table[col], name=col) # check that we can read t2 = self.TABLE.read(fp) utils.assert_table_equal(table, t2) # check keyword arguments result in same table t2 = self.TABLE.read(fp, format='hdf5.pycbc_live') utils.assert_table_equal(table, t2) t2 = self.TABLE.read(fp, format='hdf5.pycbc_live', ifo='X1') utils.assert_table_equal(table, t2) # add another IFO, then assert that reading the table without # specifying the IFO fails with h5py.File(fp) as h5f: h5f.create_group('Z1') with pytest.raises(ValueError) as exc: self.TABLE.read(fp) assert str(exc.value).startswith( 'PyCBC live HDF5 file contains dataset groups') # but check that we can still read the original t2 = self.TABLE.read(fp, format='hdf5.pycbc_live', ifo='X1') utils.assert_table_equal(table, t2) # assert processed colums works t2 = self.TABLE.read(fp, ifo='X1', columns=['mchirp', 'new_snr']) mchirp = (table['mass1'] * table['mass2']) ** (3/5.) / ( table['mass1'] + table['mass2']) ** (1/5.) utils.assert_array_equal(t2['mchirp'], mchirp) # test with selection t2 = self.TABLE.read(fp, format='hdf5.pycbc_live', ifo='X1', selection='snr>.5') utils.assert_table_equal(filter_table(table, 'snr>.5'), t2) finally: if os.path.isdir(os.path.dirname(fp)): shutil.rmtree(os.path.dirname(fp))
def test_crop_frequencies(self): array = self.create(f0=0, df=1) # test simple array2 = array.crop_frequencies() utils.assert_quantity_sub_equal(array, array2) # test normal array2 = array.crop_frequencies(2, 5) utils.assert_array_equal(array2.value, array.value[:, 2:5]) assert array2.f0 == 2 * units.Hertz assert array2.df == array.df # test warnings with pytest.warns(UserWarning): array.crop_frequencies(array.yspan[0] - 1, array.yspan[1]) with pytest.warns(UserWarning): array.crop_frequencies(array.yspan[0], array.yspan[1] + 1)
def t(evaluation, dataset, predictor): assert_equal(dataset, 'dataset') assert_is_instance(evaluation, EvaluationSheet) assert_array_equal(evaluation.tp, [1, 1, 2, 2]) assert_array_equal(evaluation.fp, [0, 1, 1, 2]) assert_array_equal(evaluation.fn, [1, 1, 0, 0]) assert_array_equal(evaluation.tn, [2, 1, 1, 0]) assert_equal(predictor, 'predictor') t.called = True
def test_add_loudest(self, usetex, table): table.add_column(table.Column(data=['test'] * len(table), name='test')) loudest = table[table['snr'].argmax()] t, f, s = loudest['time'], loudest['frequency'], loudest['snr'] # make plot fig, ax = self.new() ax.scatter(table['time'], table['frequency']) tpos = ax.title.get_position() # call function coll, text = ax.add_loudest( table, 'snr', # table, rank 'time', 'frequency', # x, y 'test', # extra columns to print 'time', # duplicate (shouldn't get printed) ) # check marker was placed at the right point utils.assert_array_equal(coll.get_offsets(), [(t, f)]) # check text result = ('Loudest event: Time = {0}, Frequency = {1}, SNR = {2}, ' 'Test = test'.format( *('{0:.2f}'.format(x) for x in (t, f, s)))) assert text.get_text() == result assert text.get_position() == (.5, 1.) # assert title got moved assert ax.title.get_position() == (tpos[0], tpos[1] + .05) # -- with more kwargs _, t = ax.add_loudest(table, 'snr', 'time', 'frequency', position=(0., 0.), ha='left', va='top') assert t.get_position() == (0., 0.) # assert title doesn't get moved again if we specify position assert ax.title.get_position() == (tpos[0], tpos[1] + .05) # assert kw handling assert t.get_horizontalalignment() == 'left' assert t.get_verticalalignment() == 'top' self.save_and_close(fig)
def test_resample(self, array): # check downsampling by factor of 2 a2 = array.resample(array.sample_rate / 2.) assert a2.sample_rate == array.sample_rate / 2. assert a2.bits is array.bits utils.assert_array_equal(a2.value[:10], [12, 0, 3, 0, 4, 0, 6, 5, 8, 0]) # check upsampling raises NotImplementedError with pytest.raises(NotImplementedError): array.resample(array.sample_rate * 2.) # check resampling by non-integer factor raises error with pytest.raises(ValueError): array.resample(array.sample_rate * .75) with pytest.raises(ValueError): array.resample(array.sample_rate * 1.5)
def test_from_nds2_buffer(self): nds_buffer = mocks.nds2_buffer('X1:TEST', self.data, 1000000000, self.data.shape[0], 'm') a = self.TEST_CLASS.from_nds2_buffer(nds_buffer) assert isinstance(a, self.TEST_CLASS) utils.assert_array_equal(a.value, self.data) assert a.unit == units.m assert a.t0 == 1000000000 * units.s assert a.dt == units.s / self.data.shape[0] assert a.name == 'X1:TEST' assert a.channel == Channel('X1:TEST', sample_rate=self.data.shape[0], unit='m', type='raw', dtype='float32') b = self.TEST_CLASS.from_nds2_buffer(nds_buffer, sample_rate=128) assert b.dt == 1 / 128. * units.s
def test_read_write_ligolw_property_columns(self): table = self.create(100, ['peak', 'snr', 'central_freq'], ['f8', 'f4', 'f4']) with tempfile.NamedTemporaryFile(suffix='.xml') as f: # write table table.write(f, format='ligolw', tablename='sngl_burst') # read raw ligolw and check gpsproperty was unpacked properly llw = io_ligolw.read_table(f, tablename='sngl_burst') for col in ('peak_time', 'peak_time_ns'): assert col in llw.columnnames with io_ligolw.patch_ligotimegps(): utils.assert_array_equal(llw.get_peak(), table['peak']) # read table and assert gpsproperty was repacked properly t2 = self.TABLE.read(f, columns=table.colnames, use_numpy_dtypes=True) utils.assert_table_equal(t2, table, almost_equal=True)
def test_new(self): """Test Array creation """ # test basic empty contructor with pytest.raises(TypeError): self.TEST_CLASS() # test with some data array = self.create() utils.assert_array_equal(array.value, self.data) # test that copy=True ensures owndata a = self.create(copy=False) assert self.create(copy=False).flags.owndata is False assert self.create(copy=True).flags.owndata is True # return array for subclasses to use return array
def test_plot(self, array): with rc_context(rc={'text.usetex': False}): plot = array.plot() assert isinstance(plot, TimeSeriesPlot) # make sure there were no lines drawn assert plot.gca().lines == [] # assert one collection for each of known and active segmentlists assert len(plot.gca().collections) == len(array.bits) * 2 with tempfile.NamedTemporaryFile(suffix='.png') as f: plot.save(f.name) plot.close() # test timeseries plotting as normal plot = array.plot(format='timeseries') assert isinstance(plot, TimeSeriesPlot) line = plot.gca().lines[0] utils.assert_array_equal(line.get_xdata(), array.xindex.value) utils.assert_array_equal(line.get_ydata(), array.value) plot.close()
def test_append(self, instance): # test appending from empty (with and without copy) for copy in (True, False): new = type(instance)() new.append(instance, copy=copy) for key in new: assert numpy.shares_memory(new[key].value, instance[key].value) is not copy utils.assert_quantity_sub_equal(new[key], instance[key]) # create copy of dict that is contiguous new = type(instance)() for key in instance: a = instance[key] new[key] = type(a)([1, 2, 3, 4, 5], x0=a.xspan[1], dx=a.dx, dtype=a.dtype) # append and test b = instance.copy() b.append(new) for key in b: utils.assert_array_equal( b[key].value, numpy.concatenate((instance[key].value, new[key].value))) # create copy of dict that is discontiguous new = type(instance)() for key in instance: a = instance[key] new[key] = type(a)([1, 2, 3, 4, 5], x0=a.xspan[1], dx=a.dx, dtype=a.dtype) # check error with pytest.raises(ValueError): instance.append(new) # check padding works (don't validate too much, that is tested # elsewhere) b = instance.copy() b.append(new, pad=0)
def test_pad(self): """Test the `Series.pad` method """ ts1 = self.create() ts2 = ts1.pad(10) assert ts2.shape[0] == ts1.shape[0] + 20 utils.assert_array_equal( ts2.value, numpy.concatenate((numpy.zeros(10), ts1.value, numpy.zeros(10)))) assert ts2.x0 == ts1.x0 - 10 * ts1.x0.unit # test pre-pad ts3 = ts1.pad((20, 10)) assert ts3.size == ts1.size + 30 utils.assert_array_equal( ts3.value, numpy.concatenate((numpy.zeros(20), ts1.value, numpy.zeros(10)))) assert ts3.x0 == ts1.x0 - 20 * ts1.x0.unit # test bogus input with pytest.raises(ValueError): ts1.pad(-1)
def test_filter(self): array = self.create(t0=0, dt=1/1024., f0=0, df=1) # build filter zpk = [], [1], 1 lti = signal.lti(*zpk) fresp = numpy.nan_to_num(abs( lti.freqresp(w=array.frequencies.value)[1])) # test simple filter a2 = array.filter(*zpk) utils.assert_array_equal(array * fresp, a2) # test inplace filtering array.filter(lti, inplace=True) utils.assert_array_equal(array, a2) # test errors with pytest.raises(TypeError): array.filter(lti, blah=1)
def test_pad(self): """Test the `Series.pad` method """ ts1 = self.create() ts2 = ts1.pad(10) assert ts2.shape[0] == ts1.shape[0] + 20 utils.assert_array_equal( ts2.value, numpy.concatenate((numpy.zeros(10), ts1.value, numpy.zeros(10)))) assert ts2.x0 == ts1.x0 - 10*ts1.x0.unit # test pre-pad ts3 = ts1.pad((20, 10)) assert ts3.size == ts1.size + 30 utils.assert_array_equal( ts3.value, numpy.concatenate((numpy.zeros(20), ts1.value, numpy.zeros(10)))) assert ts3.x0 == ts1.x0 - 20*ts1.x0.unit # test bogus input with pytest.raises(ValueError): ts1.pad(-1)
def test_to_from_pycbc(self, array): from pycbc.types import FrequencySeries as PyCBCFrequencySeries array.epoch = 0 # test default conversion pycbcfs = array.to_pycbc() assert isinstance(pycbcfs, PyCBCFrequencySeries) utils.assert_array_equal(array.value, pycbcfs.data) assert array.f0.value == 0 * units.Hz assert array.df.value == pycbcfs.delta_f assert array.epoch.gps == pycbcfs.epoch # go back and check we get back what we put in in the first place a2 = type(array).from_pycbc(pycbcfs) utils.assert_quantity_sub_equal( array, a2, exclude=['name', 'unit', 'channel']) # test copy=False a2 = type(array).from_pycbc(array.to_pycbc(copy=False), copy=False) assert shares_memory(array.value, a2.value)
def test_join(self): a = self.TEST_CLASS() a.append(self.ENTRY_CLASS([1, 2, 3, 4, 5], x0=0, dx=1)) a.append(self.ENTRY_CLASS([1, 2, 3, 4, 5], x0=5, dx=1)) a.append(self.ENTRY_CLASS([1, 2, 3, 4, 5], x0=11, dx=1)) # disjoint list should throw error with pytest.raises(ValueError): a.join() # but we can pad to get rid of the errors t = a.join(gap='pad') assert isinstance(t, a.EntryClass) assert t.span == (0, 16) utils.assert_array_equal( t.value, [1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5]) # check that joining empty list produces something sensible t = self.TEST_CLASS().join() assert isinstance(t, self.TEST_CLASS.EntryClass) assert t.size == 0
def test_f_score(self): sheet = EvaluationSheet(self.scores, relevant=self.rel) expected = np.array([0, 2 / 6, 4 / 7, 4 / 8, 6 / 9, 8 / 10, 8 / 11]) assert_array_equal(sheet.f_score(), expected) # $F_\beta = \frac{\beta^2 + 1 |rel \cap ret|}{\beta^2 |rel| + |ret|}$ expected = np.array([ 0, 1.25 * 1 / (0.25 * 4 + 2), 1.25 * 2 / (0.25 * 4 + 3), 1.25 * 2 / (0.25 * 4 + 4), 1.25 * 3 / (0.25 * 4 + 5), 1.25 * 4 / (0.25 * 4 + 6), 1.25 * 4 / (0.25 * 4 + 7) ]) assert_array_equal(sheet.f_score(0.5), expected) expected = np.array([ 0, 5 * 1 / (4 * 4 + 2), 5 * 2 / (4 * 4 + 3), 5 * 2 / (4 * 4 + 4), 5 * 3 / (4 * 4 + 5), 5 * 4 / (4 * 4 + 6), 5 * 4 / (4 * 4 + 7) ]) assert_array_equal(sheet.f_score(2), expected)
def test_crop_frequencies(self): array = self.create(f0=0, df=1) # test simple array2 = array.crop_frequencies() utils.assert_quantity_sub_equal(array, array2) assert numpy.may_share_memory(array.value, array2.value) # test normal array2 = array.crop_frequencies(2, 5) utils.assert_array_equal(array2.value, array.value[:, 2:5]) assert array2.f0 == 2 * units.Hertz assert array2.df == array.df # test copy array2 = array.crop_frequencies(copy=True) assert not numpy.may_share_memory(array.value, array2.value) # test warnings with pytest.warns(UserWarning): array.crop_frequencies(array.yspan[0]-1, array.yspan[1]) with pytest.warns(UserWarning): array.crop_frequencies(array.yspan[0], array.yspan[1]+1)
def test_from_spectra(self, array): min_ = self.TEST_ARRAY.min(axis=0) max_ = self.TEST_ARRAY.max(axis=0) mean = self.TEST_ARRAY.mean(axis=0) # check basic stack works new = self.TEST_ARRAY.from_spectra(mean, min_, max_, dt=1) assert new.shape == (3, min_.size) assert new.name == mean.name assert new.epoch == mean.epoch assert new.f0 == mean.f0 assert new.df == mean.df assert new.unit == mean.unit assert new.dt == 1 * units.second utils.assert_array_equal( new.value, numpy.vstack((mean.value, min_.value, max_.value))) # check kwargs new = self.TEST_ARRAY.from_spectra( mean, min_, max_, dt=2, epoch=0, f0=100, df=.5, unit='meter', name='test') assert new.name == 'test' assert new.epoch.gps == 0 assert new.f0 == 100 * units.Hertz assert new.df == 0.5 * units.Hertz assert new.unit == units.meter # check error on timing with pytest.raises(ValueError): self.TEST_ARRAY.from_spectra(mean) self.TEST_ARRAY.from_spectra(mean, dt=array.dt) # check error on inputs with pytest.raises(ValueError): self.TEST_ARRAY.from_spectra(mean, mean[1:]) with pytest.raises(ValueError): self.TEST_ARRAY.from_spectra(mean, mean[::2])
def test_read_ligolw(self): with tempfile.NamedTemporaryFile(mode='w+') as fobj: fobj.write(LIGO_LW_ARRAY) array = FrequencySeries.read( fobj, 'psd', match={'channel': 'X1:TEST-CHANNEL_1'}) utils.assert_array_equal(array, list(range(1, 11)) / units.Hz) utils.assert_array_equal(array.frequencies, list(range(10)) * units.Hz) assert numpy.isclose(array.epoch.gps, 1000000000) # precision gah! assert array.unit == units.Hz ** -1 array2 = FrequencySeries.read( fobj, 'psd', match={'channel': 'X1:TEST-CHANNEL_2'}) assert array2.epoch is None # assert errors with pytest.raises(ValueError): FrequencySeries.read(fobj, 'blah') with pytest.raises(ValueError): FrequencySeries.read(fobj, 'psd') with pytest.raises(ValueError): FrequencySeries.read( fobj, 'psd', match={'channel': 'X1:TEST-CHANNEL_1', 'blah': 'blah'})
def test_init(self): sheet = EvaluationSheet(self.scores, relevant=self.rel) expected = np.array([[0, 1, 2, 2, 3, 4, 4], [1, 1, 1, 2, 2, 2, 3], [4, 3, 2, 2, 1, 0, 0], [-1, -1, -1, -1, -1, -1, -1]]).T assert_array_equal(sheet.data, expected) sheet = EvaluationSheet(self.scores, relevant=self.rel, universe=self.universe) expected = np.array([[0, 1, 2, 2, 3, 4, 4], [1, 1, 1, 2, 2, 2, 3], [4, 3, 2, 2, 1, 0, 0], [15, 15, 15, 14, 14, 14, 13]]).T assert_array_equal(sheet.data, expected) sheet = EvaluationSheet(self.scores, relevant=self.rel, universe=self.num_universe) # Same expected applies as above assert_array_equal(sheet.data, expected) data = np.array([[1, 0, 0, 1], [1, 1, 0, 0]]) sheet = EvaluationSheet(data) assert_array_equal(sheet.data, data)
def test_yindex(self, array): utils.assert_array_equal(array.yindex, array.bins[:-1])
def test_zip(self, array): z = array.zip() utils.assert_array_equal( z, numpy.column_stack((array.xindex.value, array.value)))
def test_measures(self): sheet_num_universe = EvaluationSheet(self.scores, relevant=self.rel, universe=self.num_universe) sheet_universe = EvaluationSheet(self.scores, relevant=self.rel, universe=self.universe) sheet_no_universe = EvaluationSheet(self.scores, relevant=self.rel) # Measures that don't require universe for sheet in (sheet_num_universe, sheet_universe, sheet_no_universe): assert_array_equal(sheet.precision(), np.array([0, 0.5, 2 / 3, 0.5, 3 / 5, 2 / 3, 4 / 7])) assert_array_equal(sheet.recall(), np.array([0, 0.25, 0.5, 0.5, 0.75, 1, 1])) # Measures that do require universe for sheet in (sheet_num_universe, sheet_universe): # XXX The following ones look wrong?! expected = np.array([1 / 16, 1 / 16, 1 / 16, 1 / 8, 1 / 8, 1 / 8, 3 / 16]) assert_array_equal(sheet.fallout(), expected) expected = np.array([4 / 19, 3 / 18, 2 / 17, 2 / 16, 1 / 15, 0, 0]) assert_array_equal(sheet.miss(), expected) expected = np.array([0.75, 0.8, 17 / 20, 0.8, 17 / 20, 0.9, 17 / 20]) assert_array_equal(sheet.accuracy(), expected) assert_array_equal(sheet.generality(), 0.2) assert_raises(UndefinedError, sheet_no_universe.fallout) assert_raises(UndefinedError, sheet_no_universe.miss) assert_raises(UndefinedError, sheet_no_universe.accuracy) assert_raises(UndefinedError, sheet_no_universe.generality)
def test_get_column(self, table): utils.assert_array_equal(table.get_column('snr'), table['snr'])
def test_read_pycbc_live(self): import h5py table = self.create( 100, names=['a', 'b', 'c', 'chisq', 'd', 'e', 'f', 'mass1', 'mass2', 'snr']) loudest = (table['snr'] > 500).nonzero()[0] psd = FrequencySeries(random.randn(1000), df=1) fp = os.path.join(tempfile.mkdtemp(), 'X1-Live-0-0.hdf') try: # write table in pycbc_live format (by hand) with h5py.File(fp, 'w') as h5f: group = h5f.create_group('X1') for col in table.columns: group.create_dataset(data=table[col], name=col) group.create_dataset('loudest', data=loudest) group.create_dataset('psd', data=psd.value) group['psd'].attrs['delta_f'] = psd.df.to('Hz').value # check that we can read t2 = self.TABLE.read(fp) utils.assert_table_equal(table, t2) # and check metadata was recorded correctly assert t2.meta['ifo'] == 'X1' # check keyword arguments result in same table t2 = self.TABLE.read(fp, format='hdf5.pycbc_live') utils.assert_table_equal(table, t2) t2 = self.TABLE.read(fp, format='hdf5.pycbc_live', ifo='X1') # assert loudest works t2 = self.TABLE.read(fp, loudest=True) utils.assert_table_equal(table.filter('snr > 500'), t2) # check extended_metadata=True works (default) t2 = self.TABLE.read(fp, extended_metadata=True) utils.assert_table_equal(table, t2) utils.assert_array_equal(t2.meta['loudest'], loudest) utils.assert_quantity_sub_equal( t2.meta['psd'], psd, exclude=['name', 'channel', 'unit', 'epoch']) # check extended_metadata=False works t2 = self.TABLE.read(fp, extended_metadata=False) assert t2.meta == {'ifo': 'X1'} # double-check that loudest and extended_metadata=False work t2 = self.TABLE.read(fp, loudest=True, extended_metadata=False) utils.assert_table_equal(table.filter('snr > 500'), t2) assert t2.meta == {'ifo': 'X1'} # add another IFO, then assert that reading the table without # specifying the IFO fails with h5py.File(fp) as h5f: h5f.create_group('Z1') with pytest.raises(ValueError) as exc: self.TABLE.read(fp) assert str(exc.value).startswith( 'PyCBC live HDF5 file contains dataset groups') # but check that we can still read the original t2 = self.TABLE.read(fp, format='hdf5.pycbc_live', ifo='X1') utils.assert_table_equal(table, t2) # assert processed colums works t2 = self.TABLE.read(fp, ifo='X1', columns=['mchirp', 'new_snr']) mchirp = (table['mass1'] * table['mass2']) ** (3/5.) / ( table['mass1'] + table['mass2']) ** (1/5.) utils.assert_array_equal(t2['mchirp'], mchirp) # test with selection t2 = self.TABLE.read(fp, format='hdf5.pycbc_live', ifo='X1', selection='snr>.5') utils.assert_table_equal(filter_table(table, 'snr>.5'), t2) finally: if os.path.isdir(os.path.dirname(fp)): shutil.rmtree(os.path.dirname(fp))