def test_read_write_new_ant_json_files(): """Test the new ant_metric json storage can be read and written to hdf5.""" json_infile = os.path.join(DATA_PATH, 'example_ant_metrics.json') test_file = os.path.join(DATA_PATH, 'test_output', 'test_ant_json_to_hdf5.h5') warn_message = [ "JSON-type files can still be read but are no longer " "written by default.\n" "Write to HDF5 format for future compatibility.", ] test_metrics = uvtest.checkWarnings(metrics_io.load_metric_file, func_args=[json_infile], category=PendingDeprecationWarning, nwarnings=1, message=warn_message) metrics_io.write_metric_file(test_file, test_metrics, overwrite=True) test_metrics_in = metrics_io.load_metric_file(test_file) # The written hdf5 may have these keys that differ by design # so ignore them. test_metrics.pop('history', None) test_metrics.pop('version', None) test_metrics_in.pop('history', None) test_metrics_in.pop('version', None) # This function recursively walks dictionary and compares # data types together with nt.assert_type_equal or np.allclose assert qmtest.recursive_compare_dicts(test_metrics, test_metrics_in) assert os.path.exists(test_file) os.remove(test_file)
def test_boolean_read_write_hdf5(): """Test a Boolean type is preserved in read write loop: hdf5.""" test_file = os.path.join(DATA_PATH, 'test_output', 'test_bool.h5') test_bool = True test_dict = {'good_sol': test_bool} metrics_io.write_metric_file(test_file, test_dict, overwrite=True) input_dict = metrics_io.load_metric_file(test_file) assert test_dict['good_sol'], input_dict['good_sol'] assert isinstance(input_dict['good_sol'], (np.bool_, bool)) os.remove(test_file)
def test_write_then_load_metric_file_hdf5(): """Test loaded in map is same as written one from hdf5.""" test_file = os.path.join(DATA_PATH, 'test_output', 'test.h5') test_scalar = 'hello world' path = '/' good_dict = { '0': test_scalar, 'history': "this is a test", 'version': hera_qm_version_str, 'all_metrics': { '0': test_scalar } } metrics_io.write_metric_file(test_file, good_dict) read_dict = metrics_io.load_metric_file(test_file) for key in good_dict: if isinstance(good_dict[key], dict): nt.assert_dict_equal(good_dict[key], read_dict[key]) else: nt.assert_equal(good_dict[key], read_dict[key]) os.remove(test_file)
def test_run_metrics_two_pols(firstcal_twopol): # These results were run with a seed of 0, the seed shouldn't matter # but you never know. two_pol_known_results = os.path.join( DATA_PATH, 'example_two_polarization_firstcal_results.hdf5') np.random.seed(0) firstcal_twopol.FC.run_metrics(std_cut=.5) known_output = metrics_io.load_metric_file(two_pol_known_results) known_output.pop('history', None) known_output.pop('version', None) # There are some full paths of files saved in the files # Perhaps for record keeping, but that messes up the test comparison for key in known_output: known_output[key].pop('fc_filename', None) known_output[key].pop('fc_filestem', None) known_output[key].pop('version', None) for key in firstcal_twopol.FC.metrics: firstcal_twopol.FC.metrics[key].pop('fc_filename', None) firstcal_twopol.FC.metrics[key].pop('fc_filestem', None) firstcal_twopol.FC.metrics[key].pop('version', None) assert qmtest.recursive_compare_dicts(firstcal_twopol.FC.metrics, known_output)