def pipeline_test_data(self): """ pipeline the test data to before the jump step, i.e., up to refpix """ Detector1Pipeline.call(self.sim_file, steps={ 'ipc': { 'skip': True }, 'rscd': { 'skip': True }, 'refpix': { 'save_results': True, 'output_dir': self.output_dir }, 'jump': { 'skip': True }, 'ramp_fit': { 'skip': True } }) self.jump_file = os.path.join(self.output_dir, 'step_refpix.fits') better_name = os.path.join(self.output_dir, 'jump_file.fits') shutil.move(self.jump_file, better_name) self.jump_file = better_name
def get_pipelines(exp_type): if 'DARK' in exp_type: return [calwebb_dark.DarkPipeline()] elif 'FLAT' in exp_type: return [Detector1Pipeline()] elif exp_type.lower() in IMAGING: return [Detector1Pipeline(), calwebb_image2.Image2Pipeline()] else: return [Detector1Pipeline(), calwebb_spec2.Spec2Pipeline()]
def test_niriss_detector1(self): """ Regression test of calwebb_detector1 pipeline performed on NIRISS data. """ input_file = self.get_data(self.test_dir, 'jw00034001001_01101_00001_NIRISS_uncal.fits') step = Detector1Pipeline() step.save_calibrated_ramp = True step.ipc.skip = True step.persistence.skip = True step.refpix.odd_even_columns = True step.refpix.use_side_ref_pixels = True step.refpix.side_smoothing_length = 11 step.refpix.side_gain = 1.0 step.refpix.odd_even_rows = True step.jump.rejection_threshold = 250.0 step.ramp_fit.save_opt = False step.ramp_fit.suffix = 'ramp' step.output_file = 'jw00034001001_01101_00001_NIRISS_rate.fits' step.run(input_file) outputs = [('jw00034001001_01101_00001_NIRISS_ramp.fits', 'jw00034001001_01101_00001_NIRISS_ramp_ref.fits'), ('jw00034001001_01101_00001_NIRISS_rate.fits', 'jw00034001001_01101_00001_NIRISS_rate_ref.fits') ] self.compare_outputs(outputs)
def generate_corrected_ramp(pipeline_ready_file, dark_override=None, linearity_override=None, saturation_override=None, rscd_override=None, mask_override=None, skip_dark=False, output_path=None): mypipeline = Detector1Pipeline() mypipeline.save_calibrated_ramp = True mypipeline.save_results = True if dark_override: mypipeline.dark_current.override_dark = dark_override #dark_ref_override if linearity_override: mypipeline.linearity.override_linearity = linearity_override if saturation_override: mypipeline.saturation.override_saturation = saturation_override if rscd_override: mypipeline.rscd.override_rscd = rscd_override if mask_override: mypipeline.dq_init.override_mask = mask_override if skip_dark: mypipeline.dark_current.skip = True if output_path: mypipeline.output_dir = output_path result = mypipeline.run(pipeline_ready_file) return result
def test_fgs_detector1_1(_bigdata): """ Regression test of calwebb_detector1 pipeline performed on FGS imaging mode data. """ pipe = Detector1Pipeline() pipe.ipc.skip = True pipe.refpix.odd_even_columns = True pipe.refpix.use_side_ref_pixels = True pipe.refpix.side_smoothing_length = 11 pipe.refpix.side_gain = 1.0 pipe.refpix.odd_even_rows = True pipe.jump.rejection_threshold = 250.0 pipe.persistence.skip = True pipe.ramp_fit.save_opt = False pipe.save_calibrated_ramp = True pipe.output_file = 'jw86500007001_02101_00001_GUIDER2_rate.fits' pipe.run(_bigdata+'/fgs/test_sloperpipeline/jw86500007001_02101_00001_GUIDER2_uncal.fits') # Compare calibrated ramp product n_cr = 'jw86500007001_02101_00001_GUIDER2_ramp.fits' h = fits.open(n_cr) n_ref = _bigdata+'/fgs/test_sloperpipeline/jw86500007001_02101_00001_GUIDER2_ramp_ref.fits' href = fits.open(n_ref) newh = fits.HDUList([h['primary'],h['sci'],h['err'],h['groupdq'],h['pixeldq']]) newhref = fits.HDUList([href['primary'],href['sci'],href['err'],href['groupdq'],href['pixeldq']]) result = fits.diff.FITSDiff(newh, newhref, ignore_keywords = ['DATE','CAL_VER','CAL_VCS','CRDS_VER','CRDS_CTX'], rtol = 0.00001 ) assert result.identical, result.report() # Compare multi-integration countrate image product n_int = 'jw86500007001_02101_00001_GUIDER2_rateints.fits' h = fits.open(n_int) n_ref = _bigdata+'/fgs/test_sloperpipeline/jw86500007001_02101_00001_GUIDER2_rateints_ref.fits' href = fits.open(n_ref) newh = fits.HDUList([h['primary'],h['sci'],h['err'],h['dq']]) newhref = fits.HDUList([href['primary'],href['sci'],href['err'],href['dq']]) result = fits.diff.FITSDiff(newh, newhref, ignore_keywords = ['DATE','CAL_VER','CAL_VCS','CRDS_VER','CRDS_CTX'], rtol = 0.00001 ) assert result.identical, result.report() # Compare countrate image product n_rate = 'jw86500007001_02101_00001_GUIDER2_rate.fits' h = fits.open(n_rate) n_ref = _bigdata+'/fgs/test_sloperpipeline/jw86500007001_02101_00001_GUIDER2_rate_ref.fits' href = fits.open(n_ref) newh = fits.HDUList([h['primary'],h['sci'],h['err'],h['dq']]) newhref = fits.HDUList([href['primary'],href['sci'],href['err'],href['dq']]) result = fits.diff.FITSDiff(newh, newhref, ignore_keywords = ['DATE','CAL_VER','CAL_VCS','CRDS_VER','CRDS_CTX'], rtol = 0.00001 ) assert result.identical, result.report()
def get_pipelines(exp_type): """Sorts which pipeline to use based on exp_type Parameters ---------- exp_type: str JWST exposure type Returns ------- pipeline: list Pipeline(s) to return for calibrating files. """ if 'DARK' in exp_type: pipeline = [calwebb_dark.DarkPipeline()] elif 'FLAT' in exp_type: pipeline = [Detector1Pipeline()] elif exp_type.lower() in IMAGING: pipeline = [Detector1Pipeline(), calwebb_image2.Image2Pipeline()] else: pipeline = [Detector1Pipeline(), calwebb_spec2.Spec2Pipeline()] return pipeline
def run_pipeline(list_of_jwst_files, jpl_det, path_reffiles): """ Run the jwst pipeline on JPL data, using appropriate reference files' We need to know the jpl_det and FR value to set up the reference files """ for file in list_of_jwst_files: hdulist = fits.open(file) header = hdulist[0].header fr = header['FRMRSTS'] hdulist.close() # Run 10 FPM 101, FR = 0 if jpl_det == '101': bad_pixel_mask = 'MIRI_JPL_MASK_04.02.00.fits' pixel_sat = 'MIRI_JPL_RUN10_FPM101_SATURATION_MIN_8B.00.02.fits' if fr == 0: lin_cor = 'MIRI_JPL_RUN10_FPM101_FR0_PIXEL_JPL_LINEARITY_8B.00.02.fits' reset_cor = 'MIRI_JPL_RUN9_FPM101_FAST_RESET_RESIDUAL_8B.00.01.fits' elif fr == 1: lin_cor = 'MIRI_JPL_RUN10_FPM101_FR1_PIXEL_JPL_LINEARITY_8B.00.02.fits' reset_cor = 'MIRI_JPL_RUN10_FPM101_FR1_FAST_RESET_RESIDUAL_8B.00.01.fits' else: raise Exception('Invalid FR value', fr) else: bad_pixel_mask = 'MIRI_JPL_RUN6_MASK_07.02.00.fits' pixel_sat = 'MIRI_JPL_RUN10_SCA106_SATURATION_MIN_8B.00.02.fits' if fr == 0: lin_cor = 'MIRI_JPL_RUN10_SCA106_FR0_JPL_LINEARITY_8B.00.00.fits' reset_cor = 'MIRI_JPL_RUN8_SCA106_FAST_RESET_RESIDUAL_07.00.01.fits' elif fr == 1: lin_cor = 'MIRI_JPL_RUN10_SCA106_FR1_JPL_LINEARITY_8B.00.00.fits' reset_cor = 'MIRI_JPL_RUN10_SCA106_FR1_FAST_RESET_RESIDUAL_8B.00.01.fits' else: raise Exception('Invalid FR value', fr) pipe = Detector1Pipeline() pipe.dq_init.override = path_reffiles + '/' + bad_pixel_mask pipe.saturation.override_saturation = path_reffiles + '/' + pixel_sat pipe.linearity.override_linearity = path_reffiles + '/' + lin_cor pipe.linearity.save_results = True pipe.reset.override_reset = path_reffiles + '/' + reset_cor pipe.reset.save_results = True pipe.refpix.skip = True pipe.dark_current.skip = True pipe.rscd.skip = True pipe.ipc.skip = True pipe.save_results = True result = pipe.run(file)
def test_gain_scale_naming(self): """ Regression test for gain_scale naming when results are requested to be saved for the gain_scale step. """ expfile = 'jw00001001001_01101_00001_MIRIMAGE' input_file = self.get_data(self.test_dir, expfile + '_uncal.fits') input_name = os.path.basename(input_file) step = Detector1Pipeline() step.group_scale.skip = True step.dq_init.skip = True step.saturation.skip = True step.ipc.skip = True step.superbias.skip = True step.refpix.skip = True step.rscd.skip = True step.firstframe.skip = True step.lastframe.skip = True step.linearity.skip = True step.dark_current.skip = True step.persistence.skip = True step.jump.skip = True step.ramp_fit.skip = False step.gain_scale.skip = False step.gain_scale.save_results = True step.run(input_file) files = glob('*.fits') if input_name in files: files.remove(input_name) output_file = expfile + '_gain_scale.fits' assert output_file in files files.remove(output_file) output_file = expfile + '_gain_scaleints.fits' assert output_file in files files.remove(output_file) assert not len(files)
def test_detector1pipeline4(self): """ Regression test of calwebb_detector1 pipeline performed on NIRSpec data. """ input_file = self.get_data( self.test_dir, 'jw84600007001_02101_00001_nrs1_uncal.fits') step = Detector1Pipeline() step.save_calibrated_ramp = True step.ipc.skip = True step.persistence.skip = True step.jump.rejection_threshold = 4.0 step.ramp_fit.save_opt = False step.output_file = 'jw84600007001_02101_00001_nrs1_rate.fits' step.run(input_file) outputs = [('jw84600007001_02101_00001_nrs1_ramp.fits', 'jw84600007001_02101_00001_nrs1_ramp_ref.fits'), ('jw84600007001_02101_00001_nrs1_rate.fits', 'jw84600007001_02101_00001_nrs1_rate_ref.fits')] self.compare_outputs(outputs)
def calibrate(self, configdir=None, outdir=None, **kwargs): """ Pipeline process the file Parameters ---------- configdir: str The directory containing the configuration files outdir: str The directory to put the calibrated files into """ # Get config directory if configdir is None: configdir = resource_filename('specialsoss', 'files') # Get output directory if outdir is None: outdir = os.path.dirname(self.file) # Get basename basename = os.path.basename(self.file) file = os.path.join(outdir, basename) # Dict for new files new_files = {} if self.ext == 'uncal': # Create Detector1Pipeline instance cfg1_file = os.path.join(configdir, 'calwebb_tso1.cfg') from jwst.pipeline import Detector1Pipeline tso1 = Detector1Pipeline.call(self.file, save_results=True, config_file=cfg1_file, output_dir=outdir) # Calibrated files new_files['ramp'] = os.path.join( outdir, file.replace('_uncal.fits', '_ramp.fits')) new_files['rate'] = os.path.join( outdir, file.replace('_uncal.fits', '_rate.fits')) new_files['rateints'] = os.path.join( outdir, file.replace('_uncal.fits', '_rateints.fits')) if self.ext in ['rate', 'rateints']: # SPEC2 Pipeline cfg2_file = os.path.join(configdir, 'calwebb_tso-spec2.cfg') from jwst.pipeline import Spec2Pipeline tso2 = Spec2Pipeline(save_results=True, config_file=cfg2_file, output_dir=outdir) # Configure steps tso2.cube_build.skip = True tso2.extract_2d.skip = True tso2.bkg_subtract.skip = True tso2.msa_flagging.skip = True tso2.barshadow.skip = True tso2.extract_1d.save_results = True # Run the pipeline tso2.run(self.file) # Calibrated files new_files['calints'] = os.path.join( outdir, file.replace('_rateints.fits', '_calints.fits')) new_files['x1dints'] = os.path.join( outdir, file.replace('_rateints.fits', '_x1dints.fits')) else: print("Not sure how to calibrate a '{}' file right now.".format( self.ext)) return new_files
def pipeline_test_data(self): """ pipeline the test data, save the jump output """ if self.linearity: Detector1Pipeline.call( self.ramp_file, save_results=True, output_dir=self.output_dir, output_use_model=True, steps={ 'ipc': { 'skip': True }, 'rscd': { 'skip': True }, 'lastframe': { 'save_results': True, 'output_dir': self.output_dir }, 'dark_current': { 'save_results': True, 'output_dir': self.output_dir }, #'linearity': {'skip': True}, 'jump': { 'save_results': True, 'output_dir': self.output_dir } }) else: Detector1Pipeline.call(self.ramp_file, save_results=True, output_dir=self.output_dir, output_use_model=True, steps={ 'ipc': { 'skip': True }, 'rscd': { 'skip': True }, 'lastframe': { 'save_results': True, 'output_dir': self.output_dir }, 'dark_current': { 'save_results': True, 'output_dir': self.output_dir }, 'linearity': { 'skip': True }, 'jump': { 'save_results': True, 'output_dir': self.output_dir } }) self.pre_dark_file = os.path.join(self.output_dir, 'step_lastframe.fits') self.post_dark_file = os.path.join(self.output_dir, 'step_dark_current.fits') self.jump_file = os.path.join(self.output_dir, 'step_jump.fits') self.rate_file = os.path.join(self.output_dir, 'step_rate.fits')
def test_pipeline_step_list(): """Basic test that the number of steps in CALWEBB_DETECTOR1 is what we expect""" pipeline = Detector1Pipeline() assert len(pipeline.step_defs) == 15
from jwst.pipeline import Detector1Pipeline if len(sys.argv) < 1: print ('You must specify the input directory') print ('e.g. ./runpipelinelevel1.py indir') sys.exit() rawdir = sys.argv[1] print('pipeline:',jwst.__version__) #define config dir configdir='./config/' #set up subdirectory to hold results of processing outdir=rawdir+'pipeout/' if not os.path.exists(outdir): os.makedirs(outdir) #Find files to be processed dirlist=natsort.natsorted(os.listdir(rawdir)) dirlist[:] = (value for value in dirlist if value.startswith('jw') and value.endswith('.fits')) numimages=len(dirlist) print (numimages) #Iterate over files and run pipeline for j in range(numimages): uncal_image=rawdir+dirlist[j] config=configdir+'calwebb_detector1.cfg' result = Detector1Pipeline.call(uncal_image, config_file=config, output_dir=outdir, save_results=True)
# ensure the configuration files are available if not os.path.exists('cfg_files/'): os.mkdir('cfg_files/') cfgs = collect_pipeline_cfgs.collect_pipeline_cfgs(dst='cfg_files/') # run detector1 pipeline det1s = [] dimods = [] im2mods = [] im3mods = [] for mm, od, sarr in zip(mods, odirs, subarr): det1 = Detector1Pipeline.call(mm, config_file='cfg_files/calwebb_tso1.cfg', save_results=True, output_dir=od, steps={"jump": { "rejection_threshold": 10. }}) det1s.append(det1) # now identify the filename of the rateints file, which is the one we want to continue with. load into a model and add to list. di = det1.meta.filename.split('.')[0] + 'ints.fits' dimod = datamodels.open(od + di) dimods.append(dimod) # run the Image2Pipeline im2 = Image2Pipeline.call(dimod, config_file='cfg_files/calwebb_tso-image2.cfg', save_results=True, output_dir=od) im2mods.append(im2[0])
def pipeline_mirisim(input_dir): """ """ # set up logging log_file = os.path.join(os.path.abspath(input_dir),'pipeline_MIRISim.log') # check if the pipeline log file exists if os.path.isfile(log_file): os.remove(log_file) else: pass testing_logger = logging.getLogger(__name__) testing_logger.setLevel(logging.DEBUG) handler = logging.FileHandler(log_file) handler.setLevel(logging.DEBUG) formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') handler.setFormatter(formatter) testing_logger.addHandler(handler) # check if the simulation folder exists if os.path.isdir(input_dir): pass else: print("Simulation folder not found") sys.exit(0) # go through individual simulations and run through pipeline simulations = glob.glob(os.path.join(input_dir,'IMA*','20*')) simulations.extend(glob.glob(os.path.join(input_dir,'MRS*','20*'))) simulations.extend(glob.glob(os.path.join(input_dir,'LRS*','20*'))) #for simulation in simulations: print(simulation) # get the full path of the cwd cwd = os.path.abspath(os.getcwd()) # set the output figure directory out_fig_dir = os.path.join(cwd,input_dir,'pipeline_plots') try: shutil.rmtree(out_fig_dir) except: pass os.mkdir(out_fig_dir) # cycle through the simulations for simulation in simulations: os.chdir(os.path.join(simulation,'det_images')) level1b_files = glob.glob('*.fits') # isolate the simulation name for logging sim_name = simulation.split(os.sep)[1] # check which MIRI mode with datamodels.open(level1b_files[0]) as level1b_dm: miri_mode = level1b_dm.meta.exposure.type # IMAGER -------------------------------------------- if miri_mode == 'MIR_IMAGE': # run level 1 and 2 imager pipelines for f in level1b_files: with datamodels.open(f) as level1b_dm: try: level2a_dm = Detector1Pipeline.call(level1b_dm, output_use_model=True, save_results=True, steps={'ipc': {'skip': True}}) level2a_dm.meta.wcsinfo.wcsaxes = 2 Image2Pipeline.call(level2a_dm, save_results=True, output_use_model=True) # log pass testing_logger.info('%s levels 1 and 2 passed' % sim_name) levels12_check = True except Exception as e: testing_logger.warning('%s failed' % sim_name) testing_logger.warning(' %s: %s' % (e.__class__.__name__, str(e))) levels12_check = False # run level 3 pipeline if len(level1b_files) > 1: try: level2B_files = glob.glob(os.path.join('*_cal.fits')) call(["asn_from_list", "-o", "IMA_asn.json"] + level2B_files + ["--product-name", "dither"]) dm_3_container = datamodels.ModelContainer("IMA_asn.json") Image3Pipeline.call(dm_3_container, save_results=True, steps={'tweakreg': {'skip': True}}) # log pass testing_logger.info('%s level 3 passed' % sim_name) level3_check = True except Exception as e: testing_logger.warning('%s failed' % sim_name) testing_logger.warning(' %s: %s' % (e.__class__.__name__, str(e))) level3_check = False if len(level1b_files) == 1 and levels12_check == True: level2A_file = glob.glob(os.path.join('*_rate.fits'))[0] level2B_file = glob.glob(os.path.join('*_cal.fits'))[0] # set up output plots fig, axs = plt.subplots(1, 3) fig.set_figwidth(15.0) fig.set_figheight(5.0) axs = axs.ravel() # plot level 1b, 2a, 2b with datamodels.open(level1b_files[0]) as level1b_dm: with datamodels.open(level2A_file) as level2a_dm: with datamodels.open(level2B_file) as level2b_dm: axs[0].imshow(level1b_dm.data[0][-1], cmap='jet', interpolation='nearest', norm=LogNorm(), origin='lower') axs[0].annotate('level 1B', xy=(0.7, 0.95), xycoords='axes fraction', fontsize=10, fontweight='bold', color='w') axs[1].imshow(level2a_dm.data, cmap='jet', interpolation='nearest', norm=LogNorm(), origin='lower') axs[1].annotate('level 2A', xy=(0.7, 0.95), xycoords='axes fraction', fontsize=10, fontweight='bold', color='w') axs[2].imshow(level2b_dm.data, cmap='jet', interpolation='nearest', norm=LogNorm(), origin='lower') axs[2].annotate('level 2B', xy=(0.7, 0.95), xycoords='axes fraction', fontsize=10, fontweight='bold', color='w') # save the pipeline plot out_fig_name = sim_name + '.pdf' fig.savefig(os.path.join(out_fig_dir, out_fig_name), dpi=200) del fig elif len(level1b_files) > 1 and level3_check == True: driz_dm = datamodels.open('dither_i2d.fits') # set up output plots fig, axs = plt.subplots(1, 1) fig.set_figwidth(8.0) fig.set_figheight(8.0) # plot drizzled image axs.imshow(driz_dm.data, cmap='jet', interpolation='nearest', origin='lower', norm=LogNorm(vmin=1, vmax=1000)) axs.annotate('Drizzled image', xy=(0.0, 1.02), xycoords='axes fraction', fontsize=12, fontweight='bold', color='k') axs.set_facecolor('black') # save the pipeline plot out_fig_name = sim_name + '.pdf' fig.savefig(os.path.join(out_fig_dir, out_fig_name), dpi=200) del fig # MRS -------------------------------------------- elif miri_mode == 'MIR_MRS': # run level 1 and 2 pipelines for f in level1b_files: with datamodels.open(f) as level1b_dm: try: level2a_dm = Detector1Pipeline.call(level1b_dm, output_use_model=True, save_results=True, steps={'ipc': {'skip': True}}) Spec2Pipeline.call(level2a_dm, save_results=True, steps={'straylight':{'skip':True}, 'extract_1d':{'save_results':True}}) # log pass testing_logger.info('%s levels 1 and 2 passed' % sim_name) levels12_check = True except Exception as e: testing_logger.warning('%s failed' % sim_name) testing_logger.warning(' %s: %s' % (e.__class__.__name__, str(e))) levels12_check = False # run level 3 pipeline if len(level1b_files) > 1: try: level2B_files = glob.glob(os.path.join('*_cal.fits')) call(["asn_from_list", "-o", "MRS_asn.json"] + level2B_files + ["--product-name", "dither"]) dm_3_container = datamodels.ModelContainer("MRS_asn.json") Spec3Pipeline.call(dm_3_container, save_results=True, steps={'outlier_detection': {'skip': True}, 'cube_build': {'save_results': True}, 'extract_1d': {'save_results': True}}) # log pass testing_logger.info('%s level 3 passed' % sim_name) level3_check = True except Exception as e: testing_logger.warning('%s failed' % sim_name) testing_logger.warning(' %s: %s' % (e.__class__.__name__, str(e))) level3_check = False if len(level1b_files) == 1 and levels12_check == True: level2A_file = glob.glob('*_rate.fits')[0] level2B_file = glob.glob('*_cal.fits')[0] spec_file = glob.glob('*x1d.fits')[0] # set up output plots fig, axs = plt.subplots(2, 2) fig.set_figwidth(15.0) fig.set_figheight(15.0) axs = axs.ravel() with datamodels.open(level1b_files[0]) as level1b_dm: with datamodels.open(level2A_file) as level2a_dm: with datamodels.open(level2B_file) as level2b_dm: with datamodels.open(spec_file) as spec_dm: axs[0].imshow(level1b_dm.data[0][-1], cmap='jet', interpolation='nearest', norm=LogNorm(), origin='lower') axs[0].annotate('level 1B', xy=(0.7, 0.95), xycoords='axes fraction', fontsize=10, fontweight='bold', color='w') axs[1].imshow(level2a_dm.data, cmap='jet', interpolation='nearest', norm=LogNorm(), origin='lower') axs[1].annotate('level 2A', xy=(0.7, 0.95), xycoords='axes fraction', fontsize=10, fontweight='bold', color='w') axs[2].imshow(level2b_dm.data, cmap='jet', interpolation='nearest', norm=LogNorm(), origin='lower') axs[2].annotate('level 2B', xy=(0.7, 0.95), xycoords='axes fraction', fontsize=10, fontweight='bold', color='w') # plot the spectrum axs[3].plot(spec_dm.spec[0].spec_table['WAVELENGTH'], spec_dm.spec[0].spec_table['FLUX'], c='b', marker='.', markersize=3, linestyle='-', linewidth=2) axs[3].set_ylabel(r'Flux ($\mu$Jy)') axs[3].set_xlabel(r'Wavelength ($\mu$m)') axs[3].set_xlim(4.0, 28.0) # axs[0].set_ylim(0,6000) # save the pipeline plot out_fig_name = sim_name + '.pdf' fig.savefig(os.path.join(out_fig_dir, out_fig_name), dpi=200) del fig elif len(level1b_files) > 1 and level3_check == True: # cube cube_file = glob.glob("dither*s3d.fits")[0] cube_dm = datamodels.open(cube_file) # spec spec_file = glob.glob( "dither*1d.fits")[0] dm = datamodels.open(spec_file) fig, axs = plt.subplots(1,2, figsize=(12, 8)) axs[0].imshow(np.sum(cube_dm.data, axis=0), cmap='jet', interpolation='nearest', origin='lower', norm=LogNorm(vmin=100, vmax=5e5)) axs[0].annotate('Collapsed cube', xy=(0.0, 1.02), xycoords='axes fraction', fontsize=12, fontweight='bold', color='k') axs[0].set_facecolor('black') # plot the spectrum axs[1].plot(dm.spec[0].spec_table['WAVELENGTH'], dm.spec[0].spec_table['FLUX'], c='b', marker='.', markersize=3, linestyle='-', linewidth=2) axs[1].set_ylabel(r'Flux ($\mu$Jy)') axs[1].set_xlabel(r'Wavelength ($\mu$m)') axs[1].set_xlim(4.0, 28.0) # axs[0].set_ylim(0,6000) axs[1].annotate('Spectrum)', xy=(0.0, 1.02), xycoords='axes fraction', fontsize=12, fontweight='bold', color='k') # save the pipeline plot out_fig_name = sim_name + '.pdf' fig.savefig(os.path.join(out_fig_dir, out_fig_name), dpi=200) del fig # LRS-FIXEDSLIT -------------------------------------------- elif miri_mode == 'MIR_LRS-FIXEDSLIT': # run level 1 and 2 pipelines for f in level1b_files: with datamodels.open(f) as level1b_dm: try: level2a_dm = Detector1Pipeline.call(level1b_dm, output_use_model=True, save_results=True, steps={'ipc': {'skip': True}}) Spec2Pipeline.call(level2a_dm, save_results=True, steps={'extract_1d': {'save_results': True}}) # log pass testing_logger.info('%s levels 1 and 2 passed' % sim_name) levels12_check = True except Exception as e: testing_logger.warning('%s failed' % sim_name) testing_logger.warning(' %s: %s' % (e.__class__.__name__, str(e))) levels12_check = False if len(level1b_files) == 1 and levels12_check == True: level2A_file = glob.glob('*_rate.fits')[0] level2B_file = glob.glob('*_cal.fits')[0] spec_file = glob.glob('*x1d.fits')[0] # set up output plots fig, axs = plt.subplots(2, 2) fig.set_figwidth(15.0) fig.set_figheight(15.0) axs = axs.ravel() with datamodels.open(level1b_files[0]) as level1b_dm: with datamodels.open(level2A_file) as level2a_dm: with datamodels.open(level2B_file) as level2b_dm: with datamodels.open(spec_file) as spec_dm: axs[0].imshow(level1b_dm.data[0][-1], cmap='jet', interpolation='nearest', norm=LogNorm(), origin='lower') axs[0].annotate('level 1B', xy=(0.7, 0.95), xycoords='axes fraction', fontsize=10, fontweight='bold', color='w') axs[1].imshow(level2a_dm.data, cmap='jet', interpolation='nearest', norm=LogNorm(), origin='lower') axs[1].annotate('level 2A', xy=(0.7, 0.95), xycoords='axes fraction', fontsize=10, fontweight='bold', color='w') axs[2].imshow(level2b_dm.data, cmap='jet', interpolation='nearest', norm=LogNorm(), origin='lower') axs[2].annotate('level 2B', xy=(0.7, 0.95), xycoords='axes fraction', fontsize=10, fontweight='bold',color='w') # plot the spectrum axs[3].plot(spec_dm.spec[0].spec_table['WAVELENGTH'][1:-1], spec_dm.spec[0].spec_table['FLUX'][1:-1], c='b', marker='.', markersize=3, linestyle='-', linewidth=2) axs[3].set_ylabel(r'Flux ($\mu$Jy)') axs[3].set_xlabel(r'Wavelength ($\mu$m)') axs[3].set_xlim(3.0, 15.0) # axs[0].set_ylim(0,6000) axs[3].annotate('Spectrum)', xy=(0.0, 1.02), xycoords='axes fraction', fontsize=12, fontweight='bold', color='k') axs[3].set_facecolor('white') # save the pipeline plot out_fig_name = sim_name + '.pdf' fig.savefig(os.path.join(out_fig_dir, out_fig_name), dpi=200) del fig # LRS-SLITLESS -------------------------------------------- elif miri_mode == 'MIR_LRS-SLITLESS': level1b_dm = datamodels.open(level1b_files[0]) # correct for the 'SLITLESSPRISM', 'SUBPRISM' conflict #if level1b_dm.meta.exposure.type == 'MIR_LRS-SLITLESS': # level1b_dm.meta.subarray.name = 'SUBPRISM' try: # fix subarray name problem level1b_dm.meta.subarray.name = 'SUBPRISM' Detector1Pipeline.call(level1b_dm, output_use_model=True, save_results=True, steps={'ipc': {'skip': True}, 'lastframe': {'skip': True}}) level2a_ints = glob.glob('*rateints.fits')[0] Spec2Pipeline.call(level2a_ints, save_results=True, steps={'extract_1d': {'save_results': True}}) level2b_ints = glob.glob('*calints.fits')[0] #Tso3Pipeline.call(level2b_ints) # log pass testing_logger.info('%s passed' % sim_name) levels12_check = True except Exception as e: testing_logger.warning('%s failed' % sim_name) testing_logger.warning(' %s: %s' % (e.__class__.__name__, str(e))) levels12_check = False try: level2B_files = glob.glob('*_calints.fits') call(["asn_from_list", "-o", "LRS-SLITLESS_asn.json"] + level2B_files + ["--product-name", "exposures"]) Tso3Pipeline.call('LRS-SLITLESS_asn.json') # , steps={'white_light':{'skip':True}}) # log pass testing_logger.info('%s level 3 passed' % sim_name) level3_check = True except Exception as e: testing_logger.warning('%s failed' % sim_name) testing_logger.warning(' %s: %s' % (e.__class__.__name__, str(e))) level3_check = False if len(level1b_files) == 1 and levels12_check == True: spec_file = glob.glob('*x1dints.fits')[0] # set up output plots fig, axs = plt.subplots(3, 3, sharey=True, sharex=True) fig.set_figwidth(15.0) fig.set_figheight(15.0) axs = axs.ravel() with datamodels.open(spec_file) as spec_dm: for n in range(9): # plot the spectrum axs[n].plot(spec_dm.spec[n].spec_table['WAVELENGTH'][1:-1], spec_dm.spec[n].spec_table['FLUX'][1:-1], c='b', marker='.', markersize=0, linestyle='-', linewidth=2) if n in [0, 3, 6]: axs[n].set_ylabel(r'Flux ($\mu$Jy)') if n in [6, 7, 8]: axs[n].set_xlabel(r'Wavelength ($\mu$m)') # save the pipeline plot out_fig_name = sim_name + '.pdf' plt.tight_layout(pad=0.0) fig.savefig(os.path.join(out_fig_dir, out_fig_name), dpi=200) del fig if level3_check == True: my_lightcurve = glob.glob('*.ecsv')[0] lightcurve_data = Table.read(my_lightcurve, format='ascii.ecsv') fig, axs = plt.subplots(1, 1, figsize=(10, 8)) # plot input and output ramps of the first integration axs.plot(lightcurve_data[0][:], lightcurve_data[1][:], c='b', marker='o', markersize=3, linestyle='-', linewidth=2, label='white light curve') axs.set_title('White light curve', fontsize=15) axs.set_ylabel('white light flux', fontsize=15) axs.set_xlabel('MJD', fontsize=15) plt.tight_layout(h_pad=0) # save the pipeline plot out_fig_name = sim_name + '_whitelight.pdf' plt.tight_layout(pad=0.0) fig.savefig(os.path.join(out_fig_dir, out_fig_name), dpi=200) del fig os.chdir(cwd)
with open(file, 'r') as infile: yaml_content = yaml.safe_load(infile) yaml_content['Reffiles']['astrometric'] = 'None' yaml_content['psf_wing_threshold_file'] = 'config' modified_file = file.replace('.yaml', '_mod.yaml') with io.open(modified_file, 'w') as outfile: yaml.dump(yaml_content, outfile, default_flow_style=False) t1 = imaging_simulator.ImgSim() t1.paramfile = str(modified_file) t1.create() # Call the first stage DMS if True: # Lists all uncal.fits files thecall = 'ls -1 ' + pathdir + 'jw*nis_uncal.fits' ls = subprocess.getoutput(thecall) uncal_list = np.array(ls.split('\n')) # Forge the list of uncal fits files to send to DMS #basename_list = np.copy(uncal_list) #for i in range(np.size(uncal_list)): # one, two, three = uncal_list[i].partition('_uncal') # basename_list[i] = one+'_uncal.fits' # Launch DMS stage 1 and write results in the same path as uncal images # with the name default. for i, uncal_filename in enumerate(uncal_list): result = Detector1Pipeline.call(uncal_list[i], output_dir=pathdir, save_results=True)