def test_save_qc_results(mock_es): context = FakeContext() context.post_to_elasticsearch = True context.elasticsearch_url = '/' stage = FakeStage(context) qc.save_qc_results(stage.runtime_context, {}, FakeImage()) assert mock_es.called
def test_flags_bad_if_no_master_calibration(mock_cal): mock_cal.return_value = None context = FakeContext() context.FRAME_CLASS = FakeDarkImage comparer = DarkComparer(context) image = comparer.do_stage(FakeDarkImage(30.0)) assert image.is_bad is True
def test_header_cal_type_dark(mock_frame): context = FakeContext() context.FRAME_CLASS = FakeDarkImage maker = DarkMaker(context) images = maker.do_stage([FakeDarkImage() for x in range(6)]) assert images[0].header['OBSTYPE'].upper() == 'DARK'
def test_header_cal_type_dark(mock_frame, mock_namer): mock_namer.return_value = lambda *x: 'foo.fits' context = FakeContext() context.FRAME_CLASS = FakeDarkImage maker = DarkMaker(context) images = maker.do_stage([FakeDarkImage() for x in range(6)]) assert images[0].header['OBSTYPE'].upper() == 'DARK'
def make_context_with_realistic_master_dark(dark_pattern, nx=101, ny=103, dark_level=30.0, dark_exptime=900.0, readnoise=10.0): n_stacked_images = 100 data = dark_level + dark_pattern * dark_exptime dark_noise = np.random.poisson(data) + np.random.normal(0.0, readnoise, size=(ny, nx)) dark_noise /= np.sqrt(n_stacked_images) data += dark_noise data /= dark_exptime context = FakeContext(frame_class=lambda *args, **kwargs: FakeDarkImage(data=data)) context.dark_pattern = dark_pattern return context
def test_header_cal_type_flat(mock_instrument_info, mock_namer): mock_namer.return_value = lambda *x: 'foo.fits' mock_instrument_info.return_value = None, None, None fake_context = FakeContext() fake_context.db_address = '' maker = FlatMaker(fake_context) master_flat = maker.do_stage([FakeFlatImage() for x in range(6)])[0] header = master_flat.header assert header['OBSTYPE'].upper() == 'SKYFLAT'
def test_bias_level_is_average_of_inputs(mock_instrument_info): nimages = 20 bias_levels = np.arange(nimages, dtype=float) images = [FakeBiasImage(bias_level=i) for i in bias_levels] mock_instrument_info.return_value = None, None, None fake_context = FakeContext() fake_context.db_address = '' maker = BiasMaker(fake_context) master_bias = maker.do_stage(images)[0] header = master_bias.header assert header['BIASLVL'] == np.mean(bias_levels)
def test_bias_level_is_average_of_inputs(mock_instrument_info, mock_namer): mock_namer.return_value = lambda *x: 'foo.fits' nimages = 20 bias_levels = np.arange(nimages, dtype=float) images = [FakeBiasImage(bias_level=i) for i in bias_levels] mock_instrument_info.return_value = None, None, None fake_context = FakeContext() fake_context.db_address = '' maker = BiasMaker(fake_context) master_bias = maker.do_stage(images)[0] header = master_bias.header assert header['BIASLVL'] == np.mean(bias_levels)
def test_header_master_bias_level_returns_2(mock_image): maker = BiasMaker(FakeContext()) maker.do_stage([FakeBiasImage(image_multiplier=2.0) for x in range(6)]) args, kwargs = mock_image.call_args header = kwargs['header'] assert header['BIASLVL'] == 2.0
def test_header_cal_type_bias(mock_image): maker = BiasMaker(FakeContext()) maker.do_stage([FakeBiasImage() for x in range(6)]) args, kwargs = mock_image.call_args header = kwargs['header'] assert header['OBSTYPE'].upper() == 'BIAS'
def make_context_with_realistic_master_dark(dark_pattern, nx=101, ny=103, dark_level=30.0, dark_exptime=900.0, readnoise=10.0): n_stacked_images = 100 data = dark_level + dark_pattern * dark_exptime dark_noise = np.random.poisson(data) + np.random.normal( 0.0, readnoise, size=(ny, nx)) dark_noise /= np.sqrt(n_stacked_images) data += dark_noise data /= dark_exptime context = FakeContext( frame_class=lambda *args, **kwargs: FakeDarkImage(data=data)) context.dark_pattern = dark_pattern return context
def make_context_with_master_bias(bias_level=0.0, readnoise=10.0, nx=101, ny=103): fake_master_bias = FakeBiasImage(bias_level=bias_level, data=np.random.normal(0.0, readnoise, size=(ny, nx)), nx=nx, ny=ny) return FakeContext(frame_class=lambda *args, **kwargs: fake_master_bias)
def test_makes_a_sensible_master_dark(mock_frame, mock_namer): mock_namer.return_value = lambda *x: 'foo.fits' nimages = 20 images = [FakeDarkImage() for x in range(nimages)] for i, image in enumerate(images): image.data = np.ones((image.ny, image.nx)) * i expected_master_dark = stats.sigma_clipped_mean(np.arange(nimages), 3.0) maker = DarkMaker(FakeContext(frame_class=FakeDarkImage)) stacked_images = maker.do_stage(images) assert (stacked_images[0].data == expected_master_dark).all()
def test_makes_a_sensible_master_bias(mock_frame, mock_namer): mock_namer.return_value = lambda *x: 'foo.fits' nimages = 20 expected_readnoise = 15.0 images = [FakeBiasImage() for x in range(nimages)] for image in images: image.data = np.random.normal(loc=0.0, scale=expected_readnoise, size=(image.ny, image.nx)) maker = BiasMaker(FakeContext(frame_class=FakeBiasImage)) stacked_images = maker.do_stage(images) master_bias = stacked_images[0].data assert np.abs(np.mean(master_bias)) < 0.1 actual_readnoise = np.std(master_bias) assert np.abs(actual_readnoise - expected_readnoise / (nimages**0.5)) < 0.2
def test_makes_a_sensible_master_flat(mock_frame, mock_namer): mock_namer.return_value = lambda *x: 'foo.fits' nimages = 50 flat_level = 10000.0 nx = 101 ny = 103 master_flat_variation = 0.05 images = [FakeFlatImage(flat_level, nx=nx, ny=ny) for _ in range(nimages)] flat_pattern = np.random.normal(1.0, master_flat_variation, size=(ny, nx)) for i, image in enumerate(images): image.data = flat_pattern + np.random.normal(0.0, 0.02, size=(ny, nx)) maker = FlatMaker(FakeContext(frame_class=FakeFlatImage)) stacked_images = maker.do_stage(images) np.testing.assert_allclose(stacked_images[0].data, flat_pattern, atol=0.01, rtol=0.01)
def test_makes_a_sensible_master_bias(mock_images): nimages = 20 expected_bias = 1183.0 expected_readnoise = 15.0 images = [FakeBiasImage() for x in range(nimages)] for image in images: image.data = np.random.normal(loc=expected_bias, scale=expected_readnoise, size=(image.ny, image.nx)) maker = BiasMaker(FakeContext()) maker.do_stage(images) args, kwargs = mock_images.call_args master_bias = kwargs['data'] assert np.abs(np.mean(master_bias)) < 0.1 actual_bias = float(kwargs['header']['BIASLVL']) assert np.abs(actual_bias - expected_bias) < 0.1 actual_readnoise = np.std(master_bias) assert np.abs(actual_readnoise - expected_readnoise / (nimages ** 0.5)) < 0.2
def test_image_creates_and_loads_tables_correctly(): """ Tests that add_data_tables_to_hdu_list and regenerate_data_table_from_fits_hdu_list create fits.HDUList objects correctly from astropy tables with single element entries and for astropy tables with columns where each element is a list. """ test_image = Image(FakeContext(), filename=None) table_name = 'test' a = np.arange(3) array_1 = [a, a] array_2 = [a, np.vstack((a, a)).T] for test_array in [array_1, array_2]: test_table = Table(test_array, names=('1', '2'), meta={'name': table_name}) test_table['1'].description = 'test_description' test_table['1'].unit = 'pixel' test_image.data_tables[table_name] = DataTable(data_table=test_table, name=table_name) hdu_list = [] hdu_list = test_image._add_data_tables_to_hdu_list(hdu_list) fits_hdu_list = fits.HDUList(hdu_list) test_table_dict = regenerate_data_table_from_fits_hdu_list(fits_hdu_list, table_extension_name=table_name) test_table_recreated = test_table_dict[table_name] assert (test_table_recreated == test_table).all()
def test_group_by_attributes(): maker = FlatMaker(FakeContext()) assert maker.group_by_attributes() == [ 'configuration_mode', 'ccdsum', 'filter' ]
def test_raises_an_exection_if_ccdsums_are_different(mock_images): throws_inhomogeneous_set_exception(BiasMaker, FakeContext(), 'ccdsum', '1 1')
def make_context_with_master_flat(flat_level=1.0, master_flat_variation=0.05, nx=101, ny=103): fake_master_flat = FakeFlatImage(data=np.random.normal(flat_level, master_flat_variation, size=(ny, nx)), nx=nx, ny=ny) return FakeContext(frame_class=lambda *args, **kwargs: fake_master_flat)
def test_returns_null_if_filters_are_different(mock_cal, mock_frame): mock_cal.return_value = 'test.fits' handles_inhomogeneous_set(FlatComparer, FakeContext(), 'filter', 'w')
def test_returns_null_if_ny_are_different(): handles_inhomogeneous_set(FlatMaker, FakeContext(), 'ny', 107, calibration_maker=True)
def test_null_input_image(): comparer = FlatComparer(FakeContext()) image = comparer.run(None) assert image is None
def test_min_images(): bias_maker = BiasMaker(FakeContext()) processed_images = bias_maker.do_stage([]) assert len(processed_images) == 0
def test_raises_an_exection_if_epochs_are_different(mock_images): throws_inhomogeneous_set_exception(BiasMaker, FakeContext(), 'epoch', '20160102')
def test_raises_an_exection_if_ny_are_different(mock_images): throws_inhomogeneous_set_exception(BiasMaker, FakeContext(), 'ny', 107)
def test_min_images(): dark_maker = DarkMaker(FakeContext()) processed_images = dark_maker.do_stage([]) assert len(processed_images) == 0
def test_returns_null_if_confmodes_are_different(mock_frame): handles_inhomogeneous_set(FlatMaker, FakeContext(), 'configuration_mode', 'central_2k_2x2', calibration_maker=True)
def test_master_selection_criteria(): comparer = FlatComparer(FakeContext()) assert comparer.master_selection_criteria == [ 'configuration_mode', 'ccdsum', 'filter' ]
def test_returns_null_if_filters_are_different(): handles_inhomogeneous_set(FlatMaker, FakeContext(), 'filter', 'w', calibration_maker=True)
def test_returns_null_if_configuration_modes_are_different( mock_frame, mock_cal): mock_cal.return_value = 'test.fits' handles_inhomogeneous_set(FlatComparer, FakeContext(), 'configuration_mode', 'central_2k_2x2')
def test_min_images(): flat_maker = FlatMaker(FakeContext()) processed_images = flat_maker.do_stage([]) assert len(processed_images) == 0
def test_returns_null_if_nx_are_different(): handles_inhomogeneous_set(BiasMaker, FakeContext(), 'nx', 105, calibration_maker=True)