def test_run_msaflagging(self, caplog): """Test msa flagging operation""" # Retrieve the data. collect_pipeline_cfgs('cfgs') self.get_data( *self.test_dir, 'jw95065006001_0_msa_twoslit.fits' ) asn_path = self.get_data( *self.test_dir, 'mos_udf_g235m_twoslit_spec2_asn.json' ) with open(asn_path) as fp: asn = load_asn(fp) for product in asn['products']: for member in product['members']: self.get_data( *self.test_dir, 'level2a_twoslit', member['expname'] ) # Run step. args = [ op.join('cfgs', 'calwebb_spec2.cfg'), asn_path, '--steps.msa_flagging.skip=false' ] Step.from_cmdline(args) # Test. assert 'Step msa_flagging running with args' in caplog.text assert 'Step msa_flagging done' in caplog.text for product in asn['products']: prod_name = product['name'] + '_cal.fits' assert op.isfile(prod_name)
def test_msa_missing_skip(self, caplog): """Test MSA missing failure""" input_file = self.get_data( *self.test_dir, 'level2a_twoslit', 'F170LP-G235M_MOS_observation-6-c0e0_001_DN_NRS1_mod.fits') collect_pipeline_cfgs('cfgs') args = [ op.join('cfgs', 'calwebb_spec2.cfg'), input_file, '--steps.assign_wcs.skip=true' ] Step.from_cmdline(args) assert 'Aborting remaining processing for this exposure.' in caplog.text
def test_msa_missing_nofail(self, caplog): """Test MSA missing failure""" input_file = self.get_data( *self.test_dir, 'level2a_twoslit', 'F170LP-G235M_MOS_observation-6-c0e0_001_DN_NRS1_mod.fits') collect_pipeline_cfgs('cfgs') args = [ op.join('cfgs', 'calwebb_spec2.cfg'), input_file, '--fail_on_exception=false' ] Step.from_cmdline(args) assert 'Missing MSA meta (MSAMETFL) file' in caplog.text
def test_msa_missing_skip(self, caplog): """Test MSA missing failure""" input_file = self.get_data( *self.test_dir, 'level2a_twoslit', 'F170LP-G235M_MOS_observation-6-c0e0_001_DN_NRS1_mod.fits' ) collect_pipeline_cfgs('cfgs') args = [ op.join('cfgs', 'calwebb_spec2.cfg'), input_file, '--steps.assign_wcs.skip=true' ] Step.from_cmdline(args) assert 'Aborting remaining processing for this exposure.' in caplog.text
def test_msa_missing_nofail(self, caplog): """Test MSA missing failure""" input_file = self.get_data( *self.test_dir, 'level2a_twoslit', 'F170LP-G235M_MOS_observation-6-c0e0_001_DN_NRS1_mod.fits' ) collect_pipeline_cfgs('cfgs') args = [ op.join('cfgs', 'calwebb_spec2.cfg'), input_file, '--fail_on_exception=false' ] Step.from_cmdline(args) assert 'Unable to open MSA FITS file (MSAMETFL)' in caplog.text
def test_step_from_asdf(): """Test initializing step completely from config""" config_file = t_path( Path('steps') / 'jwst_generic_pars-makeliststep_0001.asdf') step = Step.from_config_file(config_file) assert isinstance(step, MakeListStep) assert step.name == 'make_list' results = step.run() assert results == DEFAULT_RESULT
def test_step_from_asdf_noname(): """Test initializing step completely from config without a name specified""" root = 'jwst_generic_pars-makeliststep_0002' config_file = t_path(Path('steps') / (root + '.asdf')) step = Step.from_config_file(config_file) assert isinstance(step, MakeListStep) assert step.name == root results = step.run() assert results == DEFAULT_RESULT
def test_from_command_line_override(): """Test creating Step from command line using ASDF""" config_file = t_path( Path('steps') / 'jwst_generic_pars-makeliststep_0001.asdf') args = [config_file, '--par1=0.'] step = Step.from_cmdline(args) assert isinstance(step, MakeListStep) assert step.par1 == 0. assert step.par2 == 'Yes, a string' results = step.run() assert results == [0., DEFAULT_PAR2, False]
def test_asdf_roundtrip_pipeline(_jail): """Save a Pipeline pars and re-instantiate with the save parameters""" # Save the parameters par_path = 'mkp_pars.asdf' args = [ 'jwst.stpipe.tests.steps.MakeListPipeline', 'a.fits', 'b', '--steps.make_list.par1', '10.', '--steps.make_list.par2', 'par2', '--save-parameters', par_path ] Step.from_cmdline(args) # Rerun with the parameter file # Initial condition is that `Step.from_cmdline` # succeeds. args = [par_path, 'a.fits', 'b'] step = Step.from_cmdline(args) # As a secondary condition, ensure the required parameter # `par2` is set. assert step.make_list.par2 == 'par2'
def test_asn_naming(self): """Test a full run""" # Get the data collect_pipeline_cfgs('cfgs') asn_path = self.get_data( self.test_dir, 'wfs_3sets_asn.json' ) with open(asn_path) as fh: asn = load_asn(fh) for product in asn['products']: for member in product['members']: self.get_data( self.test_dir, member['expname'] ) input_files = glob('*') # Run the step. args = [ op.join('cfgs', 'calwebb_wfs-image3.cfg'), asn_path ] Step.from_cmdline(args) # Test. output_files = glob('*') for input_file in input_files: output_files.remove(input_file) print('output_files = {}'.format(output_files)) for product in asn['products']: prod_name = product['name'] prod_name = format_product(prod_name, suffix='wfscmb') prod_name += '.fits' assert prod_name in output_files output_files.remove(prod_name) # There should be no more files assert len(output_files) == 0
def test_nrc_wfss_background(filters, pupils, detectors, make_wfss_datamodel): """Test background subtraction for NIRCAM WFSS modes.""" data = make_wfss_datamodel data.meta.instrument.filter = filters data.meta.instrument.pupil = pupils data.meta.instrument.detector = detectors data.meta.instrument.channel = 'LONG' data.meta.instrument.name = 'NIRCAM' data.meta.exposure.type = 'NRC_WFSS' if data.meta.instrument.detector == 'NRCALONG': data.meta.instrument.module = 'A' elif data.meta.instrument.detector == 'NRCBLONG': data.meta.instrument.module = 'B' wcs_corrected = AssignWcsStep.call(data) # Get References wavelenrange = Step().get_reference_file(wcs_corrected, "wavelengthrange") bkg_file = Step().get_reference_file(wcs_corrected, 'wfssbkg') mask = mask_from_source_cat(wcs_corrected, wavelenrange) with datamodels.open(bkg_file) as bkg_ref: bkg_ref = no_NaN(bkg_ref) # calculate backgrounds pipeline_data_mean = robust_mean(wcs_corrected.data[mask]) test_data_mean, _, _ = sigma_clipped_stats(wcs_corrected.data, sigma=2) pipeline_reference_mean = robust_mean(bkg_ref.data[mask]) test_reference_mean, _, _ = sigma_clipped_stats(bkg_ref.data, sigma=2) assert np.isclose([pipeline_data_mean], [test_data_mean], rtol=1e-3) assert np.isclose([pipeline_reference_mean], [test_reference_mean], rtol=1e-1)
def test_create_slitlets(): """Test that slitlets are Slit type and have all the necessary fields""" dm = ImageModel() dm.meta.instrument.name = 'NIRSPEC' dm.meta.observation.date = '2016-09-05' dm.meta.observation.time = '8:59:37' msa_oper = Step().get_reference_file(dm, 'msaoper') result = create_slitlets(dm, msa_oper) slit_fields = ('name','shutter_id','dither_position','xcen', 'ycen','ymin','ymax','quadrant','source_id', 'shutter_state','source_name','source_alias', 'stellarity','source_xpos','source_ypos') for slit in result: # Test the returned data type and fields. assert type(slit) == Slit assert slit._fields == slit_fields
def test_get_failed_open_shutters(): """test that failed open shutters are returned from reference file""" # Set up data model to retrieve reference file dm = ImageModel() dm.meta.instrument.name = 'NIRSPEC' dm.meta.observation.date = '2016-09-05' dm.meta.observation.time = '8:59:37' # Get reference file and return all failed open shutters msa_oper = Step().get_reference_file(dm, 'msaoper') result = get_failed_open_shutters(msa_oper) # get_failed_open_shutters returns 3 flaggable states # state, Internal state, and TA state. for shutter in result: assert (shutter['state'] == 'open' or shutter['Internal state'] == 'open' or shutter['TA state'] == 'open')