def test_json(self): """Simple test of the serialized JSON content of a metric.""" name = 'T1' description = 'Test' unit = u.mag reference_doc = 'TEST-1' reference_page = 1 reference_url = 'example.com' m = Metric(name, description, unit, tags=['tagA', 'tagB'], reference_doc=reference_doc, reference_url=reference_url, reference_page=reference_page) j = m.json self.assertEqual(j['name'], name) self.assertEqual(j['description'], description) self.assertEqual(j['unit'], unit) self.assertEqual(j['reference']['doc'], reference_doc) self.assertEqual(j['reference']['page'], reference_page) self.assertEqual(j['reference']['url'], reference_url) self.assertIn('tagA', j['tags']) self.assertIn('tagB', j['tags']) self.assertNotIn('tagC', j['tags']) # rebuild from json m2 = Metric.deserialize(**j) self.assertEqual(m, m2)
def test_setitem_delitem(self): """Test adding and deleting metrics.""" m1 = Metric('validate_drp.test', 'test', '', reference_url='example.com', reference_doc='Doc', reference_page=1) metric_set = MetricSet() self.assertEqual(len(metric_set), 0) metric_set['validate_drp.test'] = m1 self.assertEqual(len(metric_set), 1) self.assertEqual(metric_set['validate_drp.test'], m1) with self.assertRaises(KeyError): # inconsistent metric names metric_set['validate_drp.new_test'] = m1 with self.assertRaises(TypeError): # Not a metric name n = Name('validate_drp') m2 = Metric(n, 'test', '') metric_set[n] = m2 del metric_set['validate_drp.test'] self.assertEqual(len(metric_set), 0)
def test_reference_string(self): """Verify reference property for different reference datasets.""" m1 = Metric('test', 'test', '', reference_url='example.com', reference_doc='Doc', reference_page=1) self.assertEqual(m1.reference, 'Doc, p. 1, example.com') m2 = Metric('test', 'test', '', reference_url='example.com') self.assertEqual(m2.reference, 'example.com') m3 = Metric('test', 'test', '', reference_url='example.com', reference_doc='Doc') self.assertEqual(m3.reference, 'Doc, example.com') m4 = Metric('test', 'test', '', reference_doc='Doc', reference_page=1) self.assertEqual(m4.reference, 'Doc, p. 1') m4 = Metric('test', 'test', '', reference_doc='Doc') self.assertEqual(m4.reference, 'Doc')
def test_str(self): m1 = Metric('test', 'test docs', 'arcsec', reference_url='example.com', reference_doc='Doc', reference_page=1) self.assertEqual(str(m1), 'test (arcsec): test docs') m2 = Metric('test2', 'some words', '') self.assertEqual( str(m2), 'test2 (dimensionless_unscaled): some words')
def setUp(self): self.job = Job() self.job.metrics.insert( Metric("foo.boringmetric", "", u.percent, tags=["redundant"])) self.job.metrics.insert( Metric("foo.fancymetric", "", u.meter, tags=["vital"])) self.job.measurements.insert( Measurement("foo.fancymetric", 2.0 * u.meter)) self.job.measurements.insert( Measurement("foo.fanciermetric", 3.5 * u.second)) self.job.measurements["foo.fanciermetric"].notes["fanciness"] \ = "moderate" self.job.measurements.insert( Measurement("foo.fanciestmetric", 3.1415927 * u.kilogram)) self.job.meta["bar"] = "high" self.job.meta["shape"] = "rotund" self.job.specs.insert( ThresholdSpecification("utterly_ridiculous", 1e10 * u.meter, ">"))
def test_metadataonly(self, mock_stdout): """Test that inspect_job can handle files with metadata but no metrics. """ # Job and its components were not designed to support deletion, so # create a new Job from scratch to ensure it's a valid object. job = Job() job.metrics.insert( Metric("foo.boringmetric", "", u.percent, tags=["redundant"])) job.metrics.insert( Metric("foo.fancymetric", "", u.meter, tags=["vital"])) job.meta["bar"] = "high" job.meta["shape"] = "rotund" job.specs.insert( ThresholdSpecification("utterly_ridiculous", 1e10 * u.meter, ">")) inspect_job(job) output = mock_stdout.getvalue() for key, value in [("bar", "high"), ("shape", "rotund")]: self._check_metadata(key, value, output)
def test_insert(self): """Test MetricSet.insert.""" m1 = Metric('validate_drp.test', 'test', '', reference_url='example.com', reference_doc='Doc', reference_page=1) metric_set = MetricSet() metric_set.insert(m1) self.assertEqual(m1, metric_set['validate_drp.test'])
def setUp(self): # Mock metrics self.metric_photrms = Metric('test.PhotRms', 'Photometric RMS', 'mmag') self.metric_photmed = Metric('test.PhotMedian', 'Median magntidue', 'mag') self.metric_set = MetricSet([self.metric_photrms, self.metric_photmed]) # Mock specifications self.spec_photrms_design = ThresholdSpecification( 'test.PhotRms.design', 20. * u.mmag, '<') self.spec_set = SpecificationSet([self.spec_photrms_design]) # Mock measurements self.meas_photrms = Measurement(self.metric_photrms, 15 * u.mmag, notes={'note': 'value'}) self.meas_photrms.extras['n_stars'] = Datum( 250, label='N stars', description='Number of stars included in RMS estimate') self.measurement_set = MeasurementSet([self.meas_photrms]) # Metrics for Job 2 self.metric_test_2 = Metric('test2.SourceCount', 'Source Count', '') self.blob_test_2 = Blob('test2_blob', sn=Datum(50 * u.dimensionless_unscaled, label='S/N')) self.metric_set_2 = MetricSet([self.metric_test_2]) # Specifications for Job 2 self.spec_test_2 = ThresholdSpecification( 'test2.SourceCount.design', 100 * u.dimensionless_unscaled, '>=') self.spec_set_2 = SpecificationSet([self.spec_test_2]) # Measurements for Job 2 self.meas_test_2_SourceCount = Measurement( self.metric_test_2, 200 * u.dimensionless_unscaled) self.meas_test_2_SourceCount.link_blob(self.blob_test_2) self.measurement_set_2 = MeasurementSet([self.meas_test_2_SourceCount])
def test_metricsonly(self, mock_stdout): """Test that inspect_job can handle files with metrics but no metadata. """ # Job and its components were not designed to support deletion, so # create a new Job from scratch to ensure it's a valid object. job = Job() job.metrics.insert( Metric("foo.boringmetric", "", u.percent, tags=["redundant"])) job.metrics.insert( Metric("foo.fancymetric", "", u.meter, tags=["vital"])) job.measurements.insert(Measurement("foo.fancymetric", 2.0 * u.meter)) job.measurements.insert( Measurement("foo.fanciermetric", 3.5 * u.second)) job.measurements["foo.fanciermetric"].notes["fanciness"] = "moderate" job.measurements.insert( Measurement("foo.fanciestmetric", 3.1415927 * u.kilogram)) inspect_job(job) output = mock_stdout.getvalue() # MeasurementSet.values does not exist for _, measurement in job.measurements.items(): self._check_measurement(measurement, output)
def test_PA1_deferred_metric(self): """Test a measurement when the Metric instance is added later.""" measurement = Measurement('PA1', 0.002 * u.mag) self.assertIsNone(measurement.metric) self.assertEqual(measurement.metric_name, Name(metric='PA1')) # Try adding in a metric with the wrong units to existing quantity other_metric = Metric('testing.other', 'Incompatible units', 'arcsec') with self.assertRaises(TypeError): measurement.metric = other_metric # Add metric in; the name should also update measurement.metric = self.pa1 self.assertEqual(measurement.metric, self.pa1) self.assertEqual(measurement.metric_name, self.pa1.name)
def setUp(self): self.pa1 = Metric( 'validate_drp.PA1', "The maximum rms of the unresolved source magnitude distribution " "around the mean value (repeatability).", 'mmag', tags=['photometric', 'LPM-17'], reference_doc='LPM-17', reference_url='http://ls.st/lpm-17', reference_page=21) self.blob1 = Blob('Blob1') self.blob1['datum1'] = Datum(5 * u.arcsec, 'Datum 1') self.blob1['datum2'] = Datum(28. * u.mag, 'Datum 2') self.blob2 = Blob('Blob2') self.blob2['datumN'] = Datum(11 * u.dimensionless_unscaled, 'Count')
def test_iadd(self): """Test __iadd__ to merging metric sets.""" m1 = Metric('validate_drp.test', 'test', '', reference_url='example.com', reference_doc='Doc', reference_page=1) new_metric_set = MetricSet([m1]) self.metric_set += new_metric_set self.assertIn('validate_drp.test', self.metric_set) self.assertIn('testing.PA1', self.metric_set) self.assertIn('testing.PF1', self.metric_set) self.assertIn('testing.PA2', self.metric_set) self.assertIn('testing.AM1', self.metric_set)
def test_update(self): """Test MetricSet.update.""" m1 = Metric('validate_drp.test', 'test', '', reference_url='example.com', reference_doc='Doc', reference_page=1) new_metric_set = MetricSet([m1]) self.metric_set.update(new_metric_set) self.assertIn('validate_drp.test', self.metric_set) self.assertIn('testing.PA1', self.metric_set) self.assertIn('testing.PF1', self.metric_set) self.assertIn('testing.PA2', self.metric_set) self.assertIn('testing.AM1', self.metric_set)
def test_load_all_yaml_metrics(self): """Verify that all metrics from testing.yaml can be loaded.""" for metric_name in self.metric_doc: m = Metric.deserialize(metric_name, **self.metric_doc[metric_name]) self.assertIsInstance(m, Metric)
def test_check_unit(self): m = Metric('test', '', 'marcsec') self.assertTrue(m.check_unit(5. * u.arcsec)) self.assertTrue(m.check_unit(5. * u.marcsec)) self.assertFalse(m.check_unit(5. * u.mag))
def setUp(self): self.m1 = Metric('pkgA.m1', 'In pkgA', '', tags=['testing']) self.m2 = Metric('pkgA.m2', 'In pkgA', '', tags=['other']) self.m3 = Metric('pkgB.m3', 'In pkgB', '', tags=['testing']) self.metric_set = MetricSet([self.m1, self.m2, self.m3])