def test_json(self): job_json = self.job.json self.assertTrue(isinstance(job_json['measurements'], list)) self.assertEqual(len(job_json['measurements']), 1) self.assertTrue(isinstance(job_json['blobs'], list)) self.assertEqual(len(job_json['blobs']), 1) # Rebuild from JSON job2 = Job.from_json(job_json) for m1, m2 in zip(self.job.measurements, job2.measurements): self.assertEqual(m1.quantity, m2.quantity)
class JobTestCase(unittest.TestCase): """Test Job classes.""" def setUp(self): meas = DemoMeasurement() self.job = Job(measurements=[meas]) def test_json(self): job_json = self.job.json self.assertTrue(isinstance(job_json['measurements'], list)) self.assertEqual(len(job_json['measurements']), 1) self.assertTrue(isinstance(job_json['blobs'], list)) self.assertEqual(len(job_json['blobs']), 1) # Rebuild from JSON job2 = Job.from_json(job_json) for m1, m2 in zip(self.job.measurements, job2.measurements): self.assertEqual(m1.quantity, m2.quantity) def test_roundtrip(self): # Manually use temporary directories here, # because I can't figure out how to get py.test tmpdir fixture # to work in the unittest.TestCase context. tmp_dir = tempfile.mkdtemp() out_file_name = os.path.join(tmp_dir, "job_test.json") # Write JSON self.job.write_json(out_file_name) # Rebuild from JSON with open(out_file_name, 'r') as f: job2 = Job.from_json(json.load(f)) for m1, m2 in zip(self.job.measurements, job2.measurements): self.assertEqual(m1.quantity, m2.quantity) # Cleanup our temp files os.remove(out_file_name) os.removedirs(tmp_dir)
def test_json_deserialization(self): job = Job(measurements=[self.meas]) job_json = job.json meas_doc = job_json['measurements'][0] blobs_doc = job_json['blobs'] # Rebuild from JSON m2 = DeserializedMeasurement.from_json(meas_doc, blobs_json=blobs_doc) self.assertEqual(self.meas.metric.name, m2.metric.name) self.assertEqual(self.meas.quantity, m2.quantity) for k, param in self.meas.parameters.items(): self.assertEqual(param.quantity, m2.parameters[k].quantity) for k, extra in self.meas.extras.items(): self.assertEqual(extra.quantity, m2.extras[k].quantity) for k, blob in self.meas._linked_blobs.items(): for kk, datum in blob.datums.items(): self.assertEqual(datum.quantity, m2._linked_blobs[k].datums[kk].quantity)
def load_json_output(filepath): """Read JSON from a file into a job object. Currently just does a trivial de-serialization with no checking to make sure that one results with a valid validate.base.job object. Parameters ---------- filepath : `str` Source file name for JSON output. Returns ------- job : A `validate.base.job` object. """ with open(filepath, 'r') as infile: json_data = json.load(infile) return Job.from_json(json_data)
def test_roundtrip(self): # Manually use temporary directories here, # because I can't figure out how to get py.test tmpdir fixture # to work in the unittest.TestCase context. tmp_dir = tempfile.mkdtemp() out_file_name = os.path.join(tmp_dir, "job_test.json") # Write JSON self.job.write_json(out_file_name) # Rebuild from JSON with open(out_file_name, 'r') as f: job2 = Job.from_json(json.load(f)) for m1, m2 in zip(self.job.measurements, job2.measurements): self.assertEqual(m1.quantity, m2.quantity) # Cleanup our temp files os.remove(out_file_name) os.removedirs(tmp_dir)
def setUp(self): meas = DemoMeasurement() self.job = Job(measurements=[meas])
def runOneFilter(repo, visitDataIds, metrics, brightSnr=100, makeJson=True, filterName=None, outputPrefix='', useJointCal=False, verbose=False, **kwargs): """Main executable for the case where there is just one filter. Plot files and JSON files are generated in the local directory prefixed with the repository name (where '_' replace path separators), unless overriden by specifying `outputPrefix`. E.g., Analyzing a repository "CFHT/output" will result in filenames that start with "CFHT_output_". Parameters ---------- repo : string or Butler A Butler or a repository URL that can be used to construct one. dataIds : list of dict List of `butler` data IDs of Image catalogs to compare to reference. The `calexp` pixel image is needed for the photometric calibration unless useJointCal is True, in which the `photoCalib` and `wcs` datasets are used instead. Note that these have data IDs that include the tract number. metrics : `dict` or `collections.OrderedDict` Dictionary of `lsst.validate.base.Metric` instances. Typically this is data from ``validate_drp``\ 's ``metrics.yaml`` and loaded with `lsst.validate.base.load_metrics`. brightSnr : float, optional Minimum SNR for a star to be considered bright makeJson : bool, optional Create JSON output file for metrics. Saved to current working directory. outputPrefix : str, optional Specify the beginning filename for output files. filterName : str, optional Name of the filter (bandpass). useJointCal : bool, optional Use jointcal/meas_mosaic outputs to calibrate positions and fluxes. verbose : bool, optional Output additional information on the analysis steps. """ matchedDataset = MatchedMultiVisitDataset(repo, visitDataIds, useJointCal=useJointCal, verbose=verbose) photomModel = PhotometricErrorModel(matchedDataset) astromModel = AstrometricErrorModel(matchedDataset) linkedBlobs = {'photomModel': photomModel, 'astromModel': astromModel} job = Job(blobs=[matchedDataset, photomModel, astromModel]) for x in (1, 2, 3): amxName = 'AM{0:d}'.format(x) afxName = 'AF{0:d}'.format(x) adxName = 'AD{0:d}'.format(x) AMxMeasurement(metrics[amxName], matchedDataset, filterName, job=job, linkedBlobs=linkedBlobs, verbose=verbose) for specName in metrics[afxName].get_spec_names( filter_name=filterName): AFxMeasurement(metrics[afxName], matchedDataset, job.get_measurement(amxName), filterName, specName, job=job, linkedBlobs=linkedBlobs, verbose=verbose) ADxMeasurement(metrics[adxName], matchedDataset, job.get_measurement(amxName), filterName, specName, job=job, linkedBlobs=linkedBlobs, verbose=verbose) PA1Measurement(metrics['PA1'], matchedDataset, filterName, job=job, linkedBlobs=linkedBlobs, verbose=verbose) for specName in metrics['PA2'].get_spec_names(filter_name=filterName): PA2Measurement(metrics['PA2'], matchedDataset, pa1=job.get_measurement('PA1'), filter_name=filterName, spec_name=specName, verbose=verbose, job=job, linkedBlobs=linkedBlobs) for specName in metrics['PF1'].get_spec_names(filter_name=filterName): PF1Measurement(metrics['PF1'], matchedDataset, job.get_measurement('PA1'), filterName, specName, verbose=verbose, job=job, linkedBlobs=linkedBlobs) if makeJson: job.write_json(outputPrefix + '.json') return job