def test_estimation(): stride = 10 integration = 10 confidence = 0.8 bckg = BackgroundEstimator(confidence=confidence) # run handler script classifier = RadClass(stride, integration, test_data.datapath, test_data.filename, store_data=True, analysis=bckg) classifier.run_all() bckg.estimate() # the resulting observation should be: # counts * integration / live-time expected = (((integration - 1)**2 + (integration - 1)) / (2 * test_data.livetime)) * test_data.energy_bins np.testing.assert_almost_equal(bckg.background[0][1], expected, decimal=3) time_idx = np.where(spectra == 0)[0][0] np.testing.assert_equal(bckg.background[0][0], timestamps[time_idx]) expected_num = round( (test_data.timesteps / integration) * (1 - confidence)) np.testing.assert_equal(len(bckg.background), expected_num)
def test_stop(): # arbitrary but results in less than # of timestamps periods = 10 stride = int(test_data.timesteps/periods) integration = int(test_data.timesteps/periods) cache_size = 100 # stop after n-1 integration periods # so n-1 results expected stop_time = timestamps[integration*(periods-1)+1] # run handler script classifier = RadClass(stride, integration, test_data.datapath, test_data.filename, store_data=True, cache_size=cache_size, stop_time=stop_time) classifier.run_all() integration_val = (((integration*(periods-1)) * (integration*(periods-1)-1)/2) - ((integration*(periods-2)) * (integration*(periods-2)-1)/2)) expected = (np.full((test_data.energy_bins,), integration_val) / test_data.livetime) np.testing.assert_almost_equal(classifier.storage[-1, 1:], expected, decimal=2) np.testing.assert_equal(len(classifier.storage), periods-1)
def test_write(): stride = 10 integration = 10 confidence = 0.8 bckg = BackgroundEstimator(confidence=confidence) # run handler script classifier = RadClass(stride, integration, test_data.datapath, test_data.filename, store_data=True, analysis=bckg) classifier.run_all() ofilename = 'bckg_test' bckg.write(ofilename=ofilename) results = np.loadtxt(fname=ofilename + '.csv', delimiter=',') print(results) # the resulting observation should be: # counts * integration / live-time expected = (((integration - 1)**2 + (integration - 1)) / (2 * test_data.livetime)) * test_data.energy_bins np.testing.assert_almost_equal(results[0][1], expected, decimal=3) time_idx = np.where(spectra == 0)[0][0] np.testing.assert_equal(results[0][0], timestamps[time_idx]) expected_num = round( (test_data.timesteps / integration) * (1 - confidence)) np.testing.assert_equal(results.shape[0], expected_num) os.remove(ofilename + '.csv')
def test_write(): stride = 10 integration = 10 filename = 'DiffSpectra_test.csv' # run handler script with analysis parameter # small stride since there are less data samples in test_data diff_stride = 2 post_analysis = DiffSpectra(stride=diff_stride) classifier = RadClass(stride, integration, test_data.datapath, test_data.filename, post_analysis=post_analysis, store_data=True) classifier.run_all() post_analysis.write(filename) results = np.loadtxt(filename, delimiter=',') # 1 extra columns are required for timestamp # expected shape is only 1D because only 1 entry is expected obs = results.shape exp = (classifier.storage.shape[0] - diff_stride, test_data.energy_bins + 1) np.testing.assert_equal(obs, exp) os.remove(filename)
def test_analysis(): stride = int(test_data.timesteps/10) integration = int(test_data.timesteps/10) # run handler script classifier = RadClass(stride, integration, test_data.datapath, test_data.filename, analysis=NullAnalysis(), store_data=False) classifier.run_all() np.testing.assert_equal(True, classifier.analysis.changed)
def test_equal_start_stop(): stride = int(test_data.timesteps/10) integration = int(test_data.timesteps/10) # Checks if start_time = stop_time start_time = timestamps[0] stop_time = timestamps[0] classifier = RadClass(stride, integration, test_data.datapath, test_data.filename, start_time=start_time, stop_time=stop_time) with pytest.raises(RuntimeWarning): classifier.queue_file()
def test_integration(): stride = int(test_data.timesteps/10) integration = int(test_data.timesteps/10) # run handler script classifier = RadClass(stride, integration, test_data.datapath, test_data.filename, store_data=True) classifier.run_all() # the resulting 1-hour observation should be: # counts * integration / live-time expected = (np.full((test_data.energy_bins,), integration*(integration-1)/2) / test_data.livetime) results = classifier.storage[0][1:][0] np.testing.assert_almost_equal(results, expected, decimal=2)
def test_difference(): stride = 10 integration = 10 # run handler script with analysis parameter # small stride since there are less data samples in test_data diff_stride = 2 post_analysis = DiffSpectra(stride=diff_stride) classifier = RadClass(stride, integration, test_data.datapath, test_data.filename, post_analysis=post_analysis, store_data=True) classifier.run_all() diff_spectra = post_analysis.diff_spectra # DiffSpectra length test: # there should be one difference spectrum for each timestamp with # diff_stride number of spectra before it exp_len = classifier.storage.shape[0] - diff_stride np.testing.assert_equal(diff_spectra.shape[0], exp_len) # DiffSpectra value test: # for test_data, the minimum background will always be the first spectrum # in a window (because the spectra increase 1, 2, 3, 4, etc.) # therefore the diff spectra element values will always be # spectra[i] - spectra[i-diff_stride] # the counts in a given window are defined analytically for # the spectral structure in test_data as: # n1 = integration + diff_stride * stride # n2 = integration + (diff_stride - 1) * stride # n3 = integration # diff_value = (n1^2 + n1) - (n2^2 + n2) - (n3^2 + n3) # (the algebra is simplified below) diff_value = (2*diff_stride*stride**2) - stride**2 - integration**2 \ + (2*stride*integration) + stride - integration exp_spectra = np.full((exp_len, test_data.energy_bins), diff_value / (2 * test_data.livetime)) np.testing.assert_almost_equal(diff_spectra[:, 1:], exp_spectra, decimal=2) # DiffSpectra timestamp test: # there should be one difference spectrum for each timestamp with # diff_stride number of spectra before it (i.e. spectra for every timestamp # after the first diff_stride spectra) exp_ts = classifier.storage[diff_stride:, 0] np.testing.assert_equal(diff_spectra[:, 0], exp_ts)
def test_gross(): stride = 10 integration = 10 # run handler script with analysis parameter analysis = H0() classifier = RadClass(stride, integration, test_data.datapath, test_data.filename, analysis=analysis) classifier.run_all() obs_timestamp = analysis.triggers[0][0] exp_timestamp = timestamps[-(rejected_H0_time + integration)] np.testing.assert_equal(obs_timestamp, exp_timestamp) # there should only be one rejected hypothesis obs_rows = analysis.triggers.shape[0] exp_rows = 1 np.testing.assert_equal(obs_rows, exp_rows)
def test_stride(): stride = 10 integration = 5 # run handler script classifier = RadClass(stride, integration, test_data.datapath, test_data.filename) classifier.run_all() # the resulting 1-hour observation should be: # counts * integration / live-time integration_val = (((stride+integration)*(stride+integration-1)/2) - (stride*(stride-1)/2)) expected = (np.full((test_data.energy_bins,), integration_val) / test_data.livetime) expected_samples = int(test_data.timesteps / stride) np.testing.assert_almost_equal(classifier.storage[1, 1:], expected, decimal=2) np.testing.assert_equal(len(classifier.storage), expected_samples)
def test_write_gross(): stride = 10 integration = 10 filename = 'h0test_gross.csv' # run handler script with analysis parameter analysis = H0() classifier = RadClass(stride, integration, test_data.datapath, test_data.filename, analysis=analysis) classifier.run_all() analysis.write(filename) results = np.loadtxt(filename, delimiter=',') # expected shape is only 1D because only 1 entry is expected obs = results.shape exp = (4, ) np.testing.assert_equal(obs, exp) os.remove(filename)
def test_write_channel(): stride = 10 integration = 10 filename = 'h0test_channel.csv' # run handler script with analysis parameter analysis = H0(gross=False, energy_bins=test_data.energy_bins) classifier = RadClass(stride, integration, test_data.datapath, test_data.filename, analysis=analysis) classifier.run_all() analysis.write(filename) results = np.loadtxt(filename, delimiter=',') # 1 extra columns are required for timestamp # expected shape is only 1D because only 1 entry is expected obs = results.shape exp = (test_data.energy_bins + 1, ) np.testing.assert_equal(obs, exp) os.remove(filename)
def test_start(): num_results = 10 stride = int(test_data.timesteps/num_results) integration = int(test_data.timesteps/num_results) cache_size = 100 # start one integration period in start_time = timestamps[integration] # run handler script classifier = RadClass(stride, integration, test_data.datapath, test_data.filename, store_data=True, cache_size=cache_size, start_time=start_time) classifier.run_all() integration_val = (((2*integration)*(2*integration-1)/2) - (integration*(integration-1)/2)) expected = (np.full((test_data.energy_bins,), integration_val) / test_data.livetime) np.testing.assert_almost_equal(classifier.storage[0, 1:], expected, decimal=2) np.testing.assert_equal(len(classifier.storage), num_results-2)
def test_init(): stride = int(test_data.timesteps/10) integration = int(test_data.timesteps/10) store_data = True cache_size = 10000 stop_time = 2e9 classifier = RadClass(stride=stride, integration=integration, datapath=test_data.datapath, filename=test_data.filename, store_data=store_data, cache_size=cache_size, stop_time=stop_time, labels=test_data.labels) np.testing.assert_equal(stride, classifier.stride) np.testing.assert_equal(integration, classifier.integration) np.testing.assert_equal(test_data.datapath, classifier.datapath) np.testing.assert_equal(test_data.filename, classifier.filename) np.testing.assert_equal(cache_size, classifier.cache_size) np.testing.assert_equal(stop_time, classifier.stop_time) np.testing.assert_equal(test_data.labels, classifier.labels)
def test_write(): stride = 60 integration = 60 filename = 'test_results.csv' # run handler script classifier = RadClass(stride, integration, test_data.datapath, test_data.filename) classifier.run_all() classifier.write(filename) # the resulting 1-hour observation should be: # counts * integration / live-time expected = (np.full((test_data.energy_bins,), integration*(integration-1)/2) / test_data.livetime) # results array is only 1D because only one entry is expected # for test_data.timesteps results = np.loadtxt(filename, delimiter=',')[1:] np.testing.assert_almost_equal(results, expected, decimal=2) os.remove(filename)
def test_bad_start_stop(): stride = int(test_data.timesteps/10) integration = int(test_data.timesteps/10) # Checks if start_time > stop_time start_time = timestamps[1] stop_time = timestamps[0] classifier = RadClass(stride, integration, test_data.datapath, test_data.filename, start_time=start_time, stop_time=stop_time) with pytest.raises(ValueError): classifier.queue_file() # checks if start_time > last timestamp start_time = timestamps[-1] + 1000.0 classifier = RadClass(stride, integration, test_data.datapath, test_data.filename, start_time=start_time) with pytest.raises(ValueError): classifier.queue_file() # checks if stop_time < first timestamp stop_time = timestamps[0] - 1000.0 classifier = RadClass(stride, integration, test_data.datapath, test_data.filename, stop_time=stop_time) with pytest.raises(ValueError): classifier.queue_file()