def set_up_hicat(apply_continuous_dm_maps=False): """ Return a configured instance of the HiCAT simulator. Sets the pupil mask, whether the IrisAO is in or out, apodizer, Lyot stop and detector. Optionally, loads DM maps onto the two continuous face-sheet Boston DMs. :param apply_continuous_dm_maps: bool, whether to load BostonDM maps from path specified in configfile, default False :return: instance of HICAT_Sim() """ hicat_sim = hicat.simulators.hicat_sim.HICAT_Sim() hicat_sim.pupil_maskmask = CONFIG_PASTIS.get( 'HiCAT', 'pupil_mask') # I will likely have to implement a new pupil mask hicat_sim.iris_ao = CONFIG_PASTIS.get('HiCAT', 'iris_ao') hicat_sim.apodizer = CONFIG_PASTIS.get('HiCAT', 'apodizer') hicat_sim.lyot_stop = CONFIG_PASTIS.get('HiCAT', 'lyot_stop') hicat_sim.detector = 'imager' log.info(hicat_sim.describe()) # Load Boston DM maps into HiCAT simulator if apply_continuous_dm_maps: path_to_dh_solution = CONFIG_PASTIS.get('HiCAT', 'dm_maps_path') dm1_surface, dm2_surface = read_continuous_dm_maps_hicat( path_to_dh_solution) hicat_sim.dm1.set_surface(dm1_surface) hicat_sim.dm2.set_surface(dm2_surface) log.info(f'BostonDM maps applied from {path_to_dh_solution}.') return hicat_sim
def copy_config(outdir): """ Copy the config_local, or if non-existent, config_pastis.ini to outdir :param outdir: string, target location of copied configfile :return: """ print('Saving the configfile to outputs folder.') try: copy(os.path.join(CONFIG_PASTIS.get('local', 'local_repo_path'), 'pastis', 'config_local.ini'), outdir) except IOError: copy(os.path.join(CONFIG_PASTIS.get('local', 'local_repo_path'), 'pastis', 'config_pastis.ini'), outdir)
def _jwst_matrix_one_pair(norm, wfe_aber, resDir, savepsfs, saveopds, segment_pair): """ Function to calculate JWST mean contrast of one aberrated segment pair in NIRCam; for num_matrix_luvoir_multiprocess(). :param norm: float, direct PSF normalization factor (peak pixel of direct PSF) :param wfe_aber: calibration aberration per segment in m :param resDir: str, directory for matrix calculations :param savepsfs: bool, if True, all PSFs will be saved to disk individually, as fits files :param saveopds: bool, if True, all pupil surface maps of aberrated segment pairs will be saved to disk as PDF :param segment_pair: tuple, pair of segments to aberrate, 0-indexed. If same segment gets passed in both tuple entries, the segment will be aberrated only once. Note how JWST segments start numbering at 0 just because that's python indexing, with 0 being the segment A1. :return: contrast as float, and segment pair as tuple """ # Set up JWST simulator in coronagraphic state jwst_instrument, jwst_ote = webbpsf_imaging.set_up_nircam() jwst_instrument.image_mask = CONFIG_PASTIS.get('JWST', 'focal_plane_mask') # Put aberration on correct segments. If i=j, apply only once! log.info(f'PAIR: {segment_pair[0]}-{segment_pair[1]}') # Identify the correct JWST segments seg_i = webbpsf_imaging.WSS_SEGS[segment_pair[0]].split('-')[0] seg_j = webbpsf_imaging.WSS_SEGS[segment_pair[1]].split('-')[0] # Put aberration on correct segments. If i=j, apply only once! jwst_ote.zero() jwst_ote.move_seg_local(seg_i, piston=wfe_aber, trans_unit='m') if segment_pair[0] != segment_pair[1]: jwst_ote.move_seg_local(seg_j, piston=wfe_aber, trans_unit='m') log.info('Calculating coro image...') image = jwst_instrument.calc_psf(nlambda=1) psf = image[0].data / norm # Save PSF image to disk if savepsfs: filename_psf = f'psf_piston_Noll1_segs_{segment_pair[0]}-{segment_pair[1]}' hcipy.write_fits(psf, os.path.join(resDir, 'psfs', filename_psf + '.fits')) # Plot segmented mirror WFE and save to disk if saveopds: opd_name = f'opd_piston_Noll1_segs_{segment_pair[0]}-{segment_pair[1]}' plt.clf() plt.figure(figsize=(8, 8)) ax2 = plt.subplot(111) jwst_ote.display_opd(ax=ax2, vmax=500, colorbar_orientation='horizontal', title='Aberrated segment pair') plt.savefig(os.path.join(resDir, 'OTE_images', opd_name + '.pdf')) log.info('Calculating mean contrast in dark hole') iwa = CONFIG_PASTIS.getfloat('JWST', 'IWA') owa = CONFIG_PASTIS.getfloat('JWST', 'OWA') sampling = CONFIG_PASTIS.getfloat('JWST', 'sampling') dh_mask = util.create_dark_hole(psf, iwa, owa, sampling) contrast = util.dh_mean(psf, dh_mask) return contrast, segment_pair
def set_up_nircam(): """ Return a configured instance of the NIRCam simulator on JWST. Sets up the Lyot stop and filter from the configfile, turns of science instrument (SI) internal WFE and zeros the OTE. :return: Tuple of NIRCam instance, and its OTE """ nircam = webbpsf.NIRCam() nircam.include_si_wfe = False nircam.filter = CONFIG_PASTIS.get('JWST', 'filter_name') nircam.pupil_mask = CONFIG_PASTIS.get('JWST', 'pupil_plane_stop') nircam, ote = webbpsf.enable_adjustable_ote(nircam) ote.zero(zero_original=True) # https://github.com/spacetelescope/webbpsf/blob/96537c459996f682ac6e9af808809ca13fb85e87/webbpsf/opds.py#L1125 return nircam, ote
def _luvoir_matrix_one_pair(design, norm, wfe_aber, zern_mode, resDir, savepsfs, saveopds, segment_pair): """ Function to calculate LVUOIR-A mean contrast of one aberrated segment pair; for num_matrix_luvoir_multiprocess(). :param design: str, what coronagraph design to use - 'small', 'medium' or 'large' :param norm: float, direct PSF normalization factor (peak pixel of direct PSF) :param wfe_aber: float, calibration aberration per segment in m :param zern_mode: Zernike mode object, local Zernike aberration :param resDir: str, directory for matrix calculations :param savepsfs: bool, if True, all PSFs will be saved to disk individually, as fits files :param saveopds: bool, if True, all pupil surface maps of aberrated segment pairs will be saved to disk as PDF :param segment_pair: tuple, pair of segments to aberrate, 0-indexed. If same segment gets passed in both tuple entries, the segment will be aberrated only once. Note how LUVOIR segments start numbering at 1, with 0 being the center segment that doesn't exist. :return: contrast as float, and segment pair as tuple """ # Instantiate LUVOIR object sampling = CONFIG_PASTIS.getfloat('LUVOIR', 'sampling') optics_input = CONFIG_PASTIS.get('LUVOIR', 'optics_path') luv = LuvoirAPLC(optics_input, design, sampling) log.info(f'PAIR: {segment_pair[0]+1}-{segment_pair[1]+1}') # Put aberration on correct segments. If i=j, apply only once! luv.flatten() luv.set_segment(segment_pair[0]+1, wfe_aber / 2, 0, 0) if segment_pair[0] != segment_pair[1]: luv.set_segment(segment_pair[1]+1, wfe_aber / 2, 0, 0) log.info('Calculating coro image...') image, inter = luv.calc_psf(ref=False, display_intermediate=False, return_intermediate='intensity') # Normalize PSF by reference image psf = image / norm # Save PSF image to disk if savepsfs: filename_psf = f'psf_{zern_mode.name}_{zern_mode.convention + str(zern_mode.index)}_segs_{segment_pair[0]+1}-{segment_pair[1]+1}' hcipy.write_fits(psf, os.path.join(resDir, 'psfs', filename_psf + '.fits')) # Plot segmented mirror WFE and save to disk if saveopds: opd_name = f'opd_{zern_mode.name}_{zern_mode.convention + str(zern_mode.index)}_segs_{segment_pair[0]+1}-{segment_pair[1]+1}' plt.clf() hcipy.imshow_field(inter['seg_mirror'], grid=luv.aperture.grid, mask=luv.aperture, cmap='RdBu') plt.savefig(os.path.join(resDir, 'OTE_images', opd_name + '.pdf')) log.info('Calculating mean contrast in dark hole') dh_intensity = psf * luv.dh_mask contrast = np.mean(dh_intensity[np.where(luv.dh_mask != 0)]) log.info(f'contrast: {float(contrast)}') # contrast is a Field, here casting to normal float return float(contrast), segment_pair
def plot_single_mode(mode_nr, pastis_modes, out_dir, design, figsize=(8.5, 8.5), vmin=None, vmax=None, fname_suffix='', save=False): """ Plot a single PASTIS mode. :param mode_nr: int, mode index :param pastis_modes: array, PASTIS modes [seg, mode] in nm :param out_dir: str, output path to save the figure to if save=True :param design: str, "small", "medium", or "large" LUVOIR-A APLC design :param figsize: tuple, size of figure, default=(8.5,8.5) :param vmin: matplotlib min extent of image, default is None :param vmax: matplotlib max extent of image, default is None :param fname_suffix: str, optional, suffix to add to the saved file name :param save: bool, whether to save to disk or not, default is False :return: """ fname = f'mode_{mode_nr}' if fname_suffix != '': fname += f'_{fname_suffix}' # Create luvoir instance sampling = CONFIG_PASTIS.getfloat('LUVOIR', 'sampling') optics_input = CONFIG_PASTIS.get('LUVOIR', 'optics_path') luvoir = LuvoirAPLC(optics_input, design, sampling) plt.figure(figsize=figsize, constrained_layout=False) one_mode = apply_mode_to_luvoir(pastis_modes[:, mode_nr - 1], luvoir)[0] hcipy.imshow_field(one_mode.phase, cmap='RdBu', vmin=vmin, vmax=vmax) plt.axis('off') plt.annotate(f'{mode_nr}', xy=(-7.1, -6.9), fontweight='roman', fontsize=43) cbar = plt.colorbar( fraction=0.046, pad=0.04 ) # no clue what these numbers mean but it did the job of adjusting the colorbar size to the actual plot size cbar.ax.tick_params( labelsize=40) # this changes the numbers on the colorbar plt.tight_layout() if save: plt.savefig(os.path.join(out_dir, '.'.join([fname, 'pdf'])))
def calculate_mode_phases(pastis_modes, design): """ Calculate the phase maps in radians of a set of PASTIS modes. :param pastis_modes: array, PASTIS modes [seg, mode] in nm :param design: str, "small", "medium", or "large" LUVOIR-A APLC design :return: all_modes, array of phase pupil images """ # Create luvoir instance sampling = CONFIG_PASTIS.getfloat('LUVOIR', 'sampling') optics_input = CONFIG_PASTIS.get('LUVOIR', 'optics_path') luvoir = LuvoirAPLC(optics_input, design, sampling) # Calculate phases of all modes all_modes = [] for mode in range(len(pastis_modes)): all_modes.append( apply_mode_to_luvoir(pastis_modes[:, mode], luvoir)[0].phase) return all_modes
def single_mode_error_budget(design, run_choice, c_target=1e-10, single_mode=None): """ Calculate and plot single-mode error budget, for onde PASTIS mode. Calculate the mode weight and consecutive contrast for a range of target contrasts and plot the recovered contrasts against the target contrasts. :param design: str, "small", "medium" or "large" LUVOIR-A APLC design :param run_choice: str, path to data :param c_target: float, target contrast :param single_mode: int, mode index for single mode error budget :return: """ # Data directory workdir = os.path.join(CONFIG_PASTIS.get('local', 'local_data_path'), run_choice) # Info log.info(f'Working on {design} coronagraph design.') # Instantiate LUVOIR-A optics_input = CONFIG_PASTIS.get('LUVOIR', 'optics_path') sampling = CONFIG_PASTIS.getfloat('LUVOIR', 'sampling') luvoir = LuvoirAPLC(optics_input, design, sampling) luvoir.flatten() # Generate baseline contrast psf_unaber, ref = luvoir.calc_psf(ref=True) norm = ref.max() dh_intensity = psf_unaber / norm * luvoir.dh_mask coronagraph_floor = np.mean(dh_intensity[np.where(dh_intensity != 0)]) log.info(f'coronagraph_floor: {coronagraph_floor}') # Load PASTIS modes and eigenvalues pmodes, svals = modes_from_file(workdir) log.info('Single mode error budget') # Calculate the mode weight single_sigma = single_mode_sigma(c_target, coronagraph_floor, svals[single_mode - 1]) log.info(f'Eigenvalue: {svals[single_mode-1]}') log.info(f'single_sigma: {single_sigma}') single_contrast = single_mode_contrasts(single_sigma, pmodes, single_mode, luvoir) log.info(f'contrast: {single_contrast}') # Make array of target contrasts c_list = [5e-11, 8e-11, 1e-10, 5e-10, 1e-9, 5e-9, 1e-8] sigma_list = [] # Calculate according sigmas for i, con in enumerate(c_list): sigma_list.append( single_mode_sigma(con, coronagraph_floor, svals[single_mode - 1])) # Calculate recovered contrasts c_recov = [] for i, sig in enumerate(sigma_list): c_recov.append(single_mode_contrasts(sig, pmodes, single_mode, luvoir)) log.info(f'c_recov: {c_recov}') np.savetxt( os.path.join(workdir, 'results', 'single_mode_target_contrasts.txt'), c_list) np.savetxt( os.path.join(workdir, 'results', f'single_mode_recovered_contrasts_mode{single_mode}.txt'), c_recov) plt.plot(c_list, c_recov) plt.title('Single-mode scaling') plt.semilogy() plt.semilogx() plt.xlabel('Target contrast $c_{target}$') plt.ylabel('Recovered contrast') plt.savefig( os.path.join(workdir, 'results', f'single_mode_scaled_mode{single_mode}.pdf'))
for i, sig in enumerate(sigma_list): c_recov.append(single_mode_contrasts(sig, pmodes, single_mode, luvoir)) log.info(f'c_recov: {c_recov}') np.savetxt( os.path.join(workdir, 'results', 'single_mode_target_contrasts.txt'), c_list) np.savetxt( os.path.join(workdir, 'results', f'single_mode_recovered_contrasts_mode{single_mode}.txt'), c_recov) plt.plot(c_list, c_recov) plt.title('Single-mode scaling') plt.semilogy() plt.semilogx() plt.xlabel('Target contrast $c_{target}$') plt.ylabel('Recovered contrast') plt.savefig( os.path.join(workdir, 'results', f'single_mode_scaled_mode{single_mode}.pdf')) if __name__ == '__main__': coro_design = CONFIG_PASTIS.get('LUVOIR', 'coronagraph_design') run = CONFIG_PASTIS.get('numerical', 'current_analysis') c_stat = 1e-10 single_mode_error_budget(coro_design, run, c_stat, single_mode=69)
def make_aperture_nrp(): # Keep track of time start_time = time.time() # runtime currently is around 2 seconds for JWST, 9 minutes for ATLAST # Parameters telescope = CONFIG_PASTIS.get('telescope', 'name').upper() localDir = os.path.join(CONFIG_PASTIS.get('local', 'local_data_path'), 'active') outDir = os.path.join(localDir, 'segmentation') nb_seg = CONFIG_PASTIS.getint(telescope, 'nb_subapertures') # Number of apertures, without central obscuration flat_diam = CONFIG_PASTIS.getfloat(telescope, 'diameter') * u.m im_size_pupil = CONFIG_PASTIS.getint('numerical', 'tel_size_px') m_to_px = im_size_pupil/flat_diam # for conversion from meters to pixels: 3 [m] = 3 * m_to_px [px] log.info('Running aperture generation for {}\n'.format(telescope)) # If main subfolder "active" doesn't exist yet, create it. if not os.path.isdir(localDir): os.mkdir(localDir) # If subfolder "segmentation" doesn't exist yet, create it. if not os.path.isdir(outDir): os.mkdir(outDir) #-# Get the coordinates of the central pixel of each segment and save aperture to disk log.info('Getting segment centers') seg_position = np.zeros((nb_seg, 2)) if telescope == 'JWST': from e2e_simulators import webbpsf_imaging as webbim seg_position = webbim.get_jwst_coords(outDir) elif telescope == 'ATLAST': from e2e_simulators import atlast_imaging as atim _aper, seg_coords = atim.get_atlast_aperture(normalized=False, write_to_disk=True, outDir=outDir) seg_position[:,0] = seg_coords.x seg_position[:,1] = seg_coords.y # Save the segment center positions just in case we want to check them without running the code np.savetxt(os.path.join(outDir, 'seg_position.txt'), seg_position, fmt='%2.2f') # 18 segments, central segment (0) not included #-# Make distance list with distances between all of the segment centers among each other - in meters vec_list = np.zeros((nb_seg, nb_seg, 2)) for i in range(nb_seg): for j in range(nb_seg): vec_list[i,j,:] = seg_position[i,:] - seg_position[j,:] vec_list *= u.m # Save, but gotta save x and y coordinate separately because of the function I use for saving np.savetxt(os.path.join(outDir, 'vec_list_x.txt'), vec_list[:,:,0], fmt='%2.2f') # x distance; units: meters np.savetxt(os.path.join(outDir, 'vec_list_y.txt'), vec_list[:,:,1], fmt='%2.2f') # y distance; units: meters #-# Nulling redundant vectors = setting redundant vectors in vec_list equal to zero # This was really hard to figure out, so I simply went with exactly the same way like in IDL. # Reshape vec_list array to one dimension so that we can implement the loop below longshape = vec_list.shape[0] * vec_list.shape[1] vec_flat = np.reshape(vec_list, (longshape, 2)) # Save for testing np.savetxt(os.path.join(outDir, 'vec_flat.txt'), vec_flat) # Create array that will hold the nulled coordinates vec_null = np.copy(vec_flat) ap = 0 rp = 0 log.info('Nulling redundant segment pairs') for i in range(longshape): for j in range(i): # Since i starts at 0, the case with i=0 & j=0 never happens, we start at i=1 & j=0 # With this loop setup, in all cases we have i != k, which is the baseline between a # segment with itself - which is not a valid baseline, so these vectors are already set # to 0 in vec_null (they're already 0 in vec_flat). # Some print statements for testing #print('i, j', i, j) #print('vec_flat[i,:]: ', vec_flat[i,:]) #print('vec_flat[j,:]: ', vec_flat[j,:]) #print('norm diff: ', np.abs(np.linalg.norm(vec_flat[i,:]) - np.linalg.norm(vec_flat[j,:]))) #print('dir diff: ', np.linalg.norm(np.cross(vec_flat[i,:], vec_flat[j,:]))) ap += 1 # Check if length of two vectors is the same (within numerical limits) if np.abs(np.linalg.norm(vec_flat[i,:]) - np.linalg.norm(vec_flat[j,:])) <= 1.e-10: # Check if direction of two vectors is the same (within numerical limits) if np.linalg.norm(np.cross(vec_flat[i,:], vec_flat[j,:])) <= 1.e-10: # Some print statements for testing #print('i, j', i, j) #print('vec_flat[i,:]: ', vec_flat[i, :]) #print('vec_flat[j,:]: ', vec_flat[j, :]) #print('norm diff: ', np.abs(np.linalg.norm(vec_flat[i, :]) - np.linalg.norm(vec_flat[j, :]))) #print('dir diff: ', np.linalg.norm(np.cross(vec_flat[i, :], vec_flat[j, :]))) rp += 1 vec_null[i,:] = [0, 0] # Reshape nulled array back into proper shape of vec_list vec_list_nulled = np.reshape(vec_null, (vec_list.shape[0], vec_list.shape[1], 2)) # Save for testing np.savetxt(os.path.join(outDir, 'vec_list_nulled_x.txt'), vec_list_nulled[:, :, 0], fmt='%2.2f') np.savetxt(os.path.join(outDir, 'vec_list_nulled_y.txt'), vec_list_nulled[:, :, 1], fmt='%2.2f') #-# Extract the (number of) non redundant vectors: NR_distance_list # Create vector that holds distances between segments (instead of distance COORDINATES like in vec_list) distance_list = np.square(vec_list_nulled[:,:,0]) + np.square(vec_list_nulled[:,:,1]) # We use square distances so that we don't miss out on negative values nonzero = np.nonzero(distance_list) # get indices of non-redundant segment pairs NR_distance_list = distance_list[nonzero] # extract the list of distances between segments of NR pairs NR_pairs_nb = np.count_nonzero(distance_list) # Counting how many non-redundant (NR) pairs we have # Save for testing np.savetxt(os.path.join(outDir, 'NR_distance_list.txt'), NR_distance_list, fmt='%2.2f') log.info(f'Number of non-redundant pairs: {NR_pairs_nb}') #-# Select non redundant vectors # NR_pairs_list is a [NRP number, seg1, seg2] vector to hold non-redundant vector information. # NRPs are numbered from 1 to NR_pairs_nb, but Python indexing starts at 0! # Create the array of NRPs that will be the output NR_pairs_list = np.zeros((NR_pairs_nb, 2)) # NRP are numbered from 1 to NR_pairs_nb, as are the segments! # Loop over number of NRPs for i in range(NR_pairs_nb): # Since 'nonzero' holds the indices of segments, and Python indices start at 0, we have to add 1 to all the # 'segment names' in the array that tells us which NRP they form. NR_pairs_list[i,0] = nonzero[0][i] + 1 NR_pairs_list[i,1] = nonzero[1][i] + 1 # Again, NRP are numbered from 1 to NR_pairs_nb, and the segments are too! NR_pairs_list = NR_pairs_list.astype(int) # Save for testing np.savetxt(os.path.join(outDir, 'NR_pairs_list.txt'), NR_pairs_list, fmt='%i') #-# Generate projection matrix # Set diagonal to zero (distance between a segment and itself will always be zero) # Although I am pretty sure they already are. - yeah they are, vec_list is per definition a vector of distances # between all segments between each other, and the distance of a segment with itself is always zero. vec_list2 = np.copy(vec_list) for i in range(nb_seg): for j in range(nb_seg): if i ==j: vec_list2[i,j,:] = [0,0] # Save for testing np.savetxt(os.path.join(outDir, 'vec_list2_x.txt'), vec_list2[:, :, 0], fmt='%2.2f') np.savetxt(os.path.join(outDir, 'vec_list2_y.txt'), vec_list2[:, :, 1], fmt='%2.2f') # Initialize the projection matrix Projection_Matrix_int = np.zeros((nb_seg, nb_seg, 3)) # Reshape arrays so that we can loop over them easier vec2_long = vec_list2.shape[0] * vec_list2.shape[1] vec2_flat = np.reshape(vec_list2, (vec2_long, 2)) matrix_long = Projection_Matrix_int.shape[0] * Projection_Matrix_int.shape[1] matrix_flat = np.reshape(Projection_Matrix_int, (matrix_long, 3)) log.info('Creating projection matrix') for i in range(np.square(nb_seg)): # Compare segment pair in i against all available NRPs. # Where it matches, record the NRP number in the matrix entry that corresponds to segments in i. for k in range(NR_pairs_nb): # Since the segment names (numbers) in NR_pairs_list assume we start numbering the segments at 1, we have to # subtract 1 every time when we need to convert a segment number into an index. # This means we write NR_pairs_list[k,0]-1 and NR_pairs_list[k,1]-1 . # Figure out which NRP a segment distance vector corresponds to - first by length. if np.abs(np.linalg.norm(vec2_flat[i, :]) - np.linalg.norm(vec_list[NR_pairs_list[k,0]-1, NR_pairs_list[k,1]-1, :])) <= 1.e-10: # Figure out which NRP a segment distance vector corresponds to - now by direction. if np.linalg.norm(np.cross(vec2_flat[i, :], vec_list[NR_pairs_list[k,0]-1, NR_pairs_list[k,1]-1, :])) <= 1.e-10: matrix_flat[i, 0] = k + 1 # Again: NRP start their numbering at 1 matrix_flat[i, 1] = NR_pairs_list[k,1] + 1 # and segments start their numbering at 1 too matrix_flat[i, 2] = NR_pairs_list[k,0] + 1 # (see pupil image!). # Reshape matrix back to normal form Projection_Matrix = np.reshape(matrix_flat, (Projection_Matrix_int.shape[0], Projection_Matrix_int.shape[1], 3)) # Convert the segment positions in vec_list from meters to pixels vec_list_px = vec_list * m_to_px #-# Save the arrays: vec_list, NR_pairs_list, Projection_Matrix util.write_fits(vec_list_px.value, os.path.join(outDir, 'vec_list.fits'), header=None, metadata=None) util.write_fits(NR_pairs_list, os.path.join(outDir, 'NR_pairs_list_int.fits'), header=None, metadata=None) util.write_fits(Projection_Matrix, os.path.join(outDir, 'Projection_Matrix.fits'), header=None, metadata=None) log.info('All outputs saved to {}'.format(outDir)) # Tell us how long it took to finish. end_time = time.time() log.info(f'Runtime for aperture_definition.py: {end_time - start_time}sec = {(end_time - start_time)/60}min')
import astropy.units as u import logging import poppy from pastis.config import CONFIG_PASTIS import pastis.util as util log = logging.getLogger() try: import webbpsf # Setting to ensure that PyCharm finds the webbpsf-data folder. If you don't know where it is, find it with: # webbpsf.utils.get_webbpsf_data_path() # --> e.g.: >>source activate pastis >>ipython >>import webbpsf >>webbpsf.utils.get_webbpsf_data_path() os.environ['WEBBPSF_PATH'] = CONFIG_PASTIS.get('local', 'webbpsf_data_path') WSS_SEGS = webbpsf.constants.SEGNAMES_WSS_ORDER except ImportError: log.info('WebbPSF was not imported.') NB_SEG = CONFIG_PASTIS.getint('JWST', 'nb_subapertures') FLAT_TO_FLAT = CONFIG_PASTIS.getfloat('JWST', 'flat_to_flat') WVLN = CONFIG_PASTIS.getfloat('JWST', 'lambda') * u.nm IM_SIZE_PUPIL = CONFIG_PASTIS.getint('numerical', 'tel_size_px') FLAT_DIAM = CONFIG_PASTIS.getfloat('JWST', 'flat_diameter') * u.m IM_SIZE_E2E = CONFIG_PASTIS.getint('numerical', 'im_size_px_webbpsf') def get_jwst_coords(outDir):
def ana_matrix_jwst(): # Keep track of time start_time = time.time() # runtime is currently around 11 minutes log.info('Building analytical matrix for JWST\n') # Parameters datadir = os.path.join(CONFIG_PASTIS.get('local', 'local_data_path'), 'active') which_tel = CONFIG_PASTIS.get('telescope', 'name') resDir = os.path.join(datadir, 'matrix_analytical') nb_seg = CONFIG_PASTIS.getint(which_tel, 'nb_subapertures') nm_aber = CONFIG_PASTIS.getfloat(which_tel, 'calibration_aberration') * u.nm zern_number = CONFIG_PASTIS.getint('calibration', 'local_zernike') # Noll convention! zern_mode = util.ZernikeMode( zern_number) # Create Zernike mode object for easier handling # If subfolder "matrix_analytical" doesn't exist yet, create it. if not os.path.isdir(resDir): os.mkdir(resDir) #-# Generating the PASTIS matrix matrix_direct = np.zeros( [nb_seg, nb_seg]) # Generate empty matrix for contrast values from loop. all_ims = [] all_dhs = [] all_contrasts = [] for i in range(nb_seg): for j in range(nb_seg): log.info('STEP: {}-{} / {}-{}'.format(i + 1, j + 1, nb_seg, nb_seg)) # Putting aberration only on segments i and j tempA = np.zeros([nb_seg]) tempA[i] = nm_aber.value tempA[j] = nm_aber.value tempA *= u.nm # making sure this array has the right units # Create PASTIS image and save full image as well as DH image temp_im_am, full_psf = impastis.analytical_model(zern_number, tempA, cali=True) filename_psf = 'psf_' + zern_mode.name + '_' + zern_mode.convention + str( zern_mode.index) + '_segs_' + str(i + 1) + '-' + str(j + 1) util.write_fits(full_psf, os.path.join(resDir, 'psfs', filename_psf + '.fits'), header=None, metadata=None) all_ims.append(full_psf) filename_dh = 'dh_' + zern_mode.name + '_' + zern_mode.convention + str( zern_mode.index) + '_segs_' + str(i + 1) + '-' + str(j + 1) util.write_fits(temp_im_am, os.path.join(resDir, 'darkholes', filename_dh + '.fits'), header=None, metadata=None) all_dhs.append(temp_im_am) contrast = np.mean(temp_im_am[np.where(temp_im_am != 0)]) matrix_direct[i, j] = contrast log.info(f'contrast = {contrast}') all_contrasts.append(contrast) all_ims = np.array(all_ims) all_dhs = np.array(all_dhs) all_contrasts = np.array(all_contrasts) # Filling the off-axis elements matrix_two_N = np.copy( matrix_direct ) # This is just an intermediary copy so that I don't mix things up. matrix_pastis = np.copy( matrix_direct) # This will be the final PASTIS matrix. for i in range(nb_seg): for j in range(nb_seg): if i != j: matrix_off_val = (matrix_two_N[i, j] - matrix_two_N[i, i] - matrix_two_N[j, j]) / 2. matrix_pastis[i, j] = matrix_off_val log.info('Off-axis for i{}-j{}: {}'.format( i + 1, j + 1, matrix_off_val)) # Normalize matrix for the input aberration matrix_pastis /= np.square(nm_aber.value) # Save matrix to file filename = 'PASTISmatrix_' + zern_mode.name + '_' + zern_mode.convention + str( zern_mode.index) util.write_fits(matrix_pastis, os.path.join(resDir, filename + '.fits'), header=None, metadata=None) log.info(f'Matrix saved to: {os.path.join(resDir, filename + ".fits")}') # Save the PSF and DH image *cubes* as well (as opposed to each one individually) util.write_fits(all_ims, os.path.join(resDir, 'psfs', 'psf_cube' + '.fits'), header=None, metadata=None) util.write_fits(all_dhs, os.path.join(resDir, 'darkholes', 'dh_cube' + '.fits'), header=None, metadata=None) np.savetxt(os.path.join(resDir, 'pair-wise_contrasts.txt'), all_contrasts, fmt='%e') # Tell us how long it took to finish. end_time = time.time() log.info( f'Runtime for matrix_building.py: {end_time - start_time}sec = {(end_time - start_time) / 60}min' ) log.info('Data saved to {}'.format(resDir))
wvln=CONFIG_PASTIS.getfloat(instrument, 'lambda'), out_dir=resultdir, fname_suffix=f'{no_realizations}_realizations_each', save=True) end_time = time.time() runtime = end_time - start_time log.info( f'\nTotal runtime for pastis_vs_e2e_contrast_calc.py: {runtime} sec = {runtime/60} min' ) if __name__ == '__main__': # Pick one to run #hockeystick_jwst() instrument = CONFIG_PASTIS.get('telescope', 'name') run_choice = CONFIG_PASTIS.get('numerical', 'current_analysis') coro_design = CONFIG_PASTIS.get('LUVOIR', 'coronagraph_design') result_dir = os.path.join(CONFIG_PASTIS.get('local', 'local_data_path'), run_choice, 'results') matrix_dir = os.path.join(CONFIG_PASTIS.get('local', 'local_data_path'), run_choice, 'matrix_numerical') hockeystick_curve(instrument, apodizer_choice=coro_design, matrixdir=matrix_dir, resultdir=result_dir, range_points=30, no_realizations=10)
def analytical_model(zernike_pol, coef, cali=False): """ :param zernike_pol: :param coef: :param cali: bool; True if we already have calibration coefficients to use. False if we still need to create them. :return: """ #-# Parameters dataDir = os.path.join(CONFIG_PASTIS.get('local', 'local_data_path'), 'active') telescope = CONFIG_PASTIS.get('telescope', 'name') nb_seg = CONFIG_PASTIS.getint(telescope, 'nb_subapertures') tel_size_m = CONFIG_PASTIS.getfloat(telescope, 'diameter') * u.m real_size_seg = CONFIG_PASTIS.getfloat( telescope, 'flat_to_flat' ) # in m, size in meters of an individual segment flatl to flat size_seg = CONFIG_PASTIS.getint( 'numerical', 'size_seg') # pixel size of an individual segment tip to tip wvln = CONFIG_PASTIS.getint(telescope, 'lambda') * u.nm inner_wa = CONFIG_PASTIS.getint(telescope, 'IWA') outer_wa = CONFIG_PASTIS.getint(telescope, 'OWA') tel_size_px = CONFIG_PASTIS.getint( 'numerical', 'tel_size_px') # pupil diameter of telescope in pixels im_size_pastis = CONFIG_PASTIS.getint( 'numerical', 'im_size_px_pastis') # image array size in px sampling = CONFIG_PASTIS.getfloat(telescope, 'sampling') # sampling size_px_tel = tel_size_m / tel_size_px # size of one pixel in pupil plane in m px_sq_to_rad = (size_px_tel * np.pi / tel_size_m) * u.rad zern_max = CONFIG_PASTIS.getint('zernikes', 'max_zern') sz = CONFIG_PASTIS.getint( 'ATLAST', 'im_size_lamD_hcipy') # image size in lam/D, only used in ATLAST case # Create Zernike mode object for easier handling zern_mode = util.ZernikeMode(zernike_pol) #-# Mean subtraction for piston if zernike_pol == 1: coef -= np.mean(coef) #-# Generic segment shapes if telescope == 'JWST': # Load pupil from file pupil = fits.getdata( os.path.join(dataDir, 'segmentation', 'pupil.fits')) # Put pupil in randomly picked, slightly larger image array pup_im = np.copy(pupil) # remove if lines below this are active #pup_im = np.zeros([tel_size_px, tel_size_px]) #lim = int((pup_im.shape[1] - pupil.shape[1])/2.) #pup_im[lim:-lim, lim:-lim] = pupil # test_seg = pupil[394:,197:315] # this is just so that I can display an individual segment when the pupil is 512 # test_seg = pupil[:203,392:631] # ... when the pupil is 1024 # one_seg = np.zeros_like(test_seg) # one_seg[:110, :] = test_seg[8:, :] # this is the centered version of the individual segment for 512 px pupil # Creat a mini-segment (one individual segment from the segmented aperture) mini_seg_real = poppy.NgonAperture( name='mini', radius=real_size_seg ) # creating real mini segment shape with poppy #test = mini_seg_real.sample(wavelength=wvln, grid_size=flat_diam, return_scale=True) # fix its sampling with wavelength mini_hdu = mini_seg_real.to_fits(wavelength=wvln, npix=size_seg) # make it a fits file mini_seg = mini_hdu[ 0].data # extract the image data from the fits file elif telescope == 'ATLAST': # Create mini-segment pupil_grid = hcipy.make_pupil_grid(dims=tel_size_px, diameter=real_size_seg) focal_grid = hcipy.make_focal_grid( pupil_grid, sampling, sz, wavelength=wvln.to( u.m).value) # fov = lambda/D radius of total image prop = hcipy.FraunhoferPropagator(pupil_grid, focal_grid) mini_seg_real = hcipy.hexagonal_aperture(circum_diameter=real_size_seg, angle=np.pi / 2) mini_seg_hc = hcipy.evaluate_supersampled( mini_seg_real, pupil_grid, 4 ) # the supersampling number doesn't really matter in context with the other numbers mini_seg = mini_seg_hc.shaped # make it a 2D array # Redefine size_seg if using HCIPy size_seg = mini_seg.shape[0] # Make stand-in pupil for DH array pupil = fits.getdata( os.path.join(dataDir, 'segmentation', 'pupil.fits')) pup_im = np.copy(pupil) #-# Generate a dark hole mask #TODO: simplify DH generation and usage dh_area = util.create_dark_hole( pup_im, inner_wa, outer_wa, sampling ) # this might become a problem if pupil size is not same like pastis image size. fine for now though. if telescope == 'ATLAST': dh_sz = util.zoom_cen(dh_area, sz * sampling) #-# Import information form segmentation script Projection_Matrix = fits.getdata( os.path.join(dataDir, 'segmentation', 'Projection_Matrix.fits')) vec_list = fits.getdata( os.path.join(dataDir, 'segmentation', 'vec_list.fits')) # in pixels NR_pairs_list = fits.getdata( os.path.join(dataDir, 'segmentation', 'NR_pairs_list_int.fits')) # Figure out how many NRPs we're dealing with NR_pairs_nb = NR_pairs_list.shape[0] #-# Chose whether calibration factors to do the calibraiton with if cali: filename = 'calibration_' + zern_mode.name + '_' + zern_mode.convention + str( zern_mode.index) ck = fits.getdata( os.path.join(dataDir, 'calibration', filename + '.fits')) else: ck = np.ones(nb_seg) coef = coef * ck #-# Generic coefficients # the coefficients in front of the non redundant pairs, the A_q in eq. 13 in Leboulleux et al. 2018 generic_coef = np.zeros( NR_pairs_nb ) * u.nm * u.nm # setting it up with the correct units this will have for q in range(NR_pairs_nb): for i in range(nb_seg): for j in range(i + 1, nb_seg): if Projection_Matrix[i, j, 0] == q + 1: generic_coef[q] += coef[i] * coef[j] #-# Constant sum and cosine sum - calculating eq. 13 from Leboulleux et al. 2018 if telescope == 'JWST': i_line = np.linspace(-im_size_pastis / 2., im_size_pastis / 2., im_size_pastis) tab_i, tab_j = np.meshgrid(i_line, i_line) cos_u_mat = np.zeros( (int(im_size_pastis), int(im_size_pastis), NR_pairs_nb)) elif telescope == 'ATLAST': i_line = np.linspace(-(2 * sz * sampling) / 2., (2 * sz * sampling) / 2., (2 * sz * sampling)) tab_i, tab_j = np.meshgrid(i_line, i_line) cos_u_mat = np.zeros((int((2 * sz * sampling)), int( (2 * sz * sampling)), NR_pairs_nb)) # Calculating the cosine terms from eq. 13. # The -1 with each NR_pairs_list is because the segment names are saved starting from 1, but Python starts # its indexing at zero, so we have to make it start at zero here too. for q in range(NR_pairs_nb): # cos(b_q <dot> u): b_q with 1 <= q <= NR_pairs_nb is the basis of NRPS, meaning the distance vectors between # two segments of one NRP. We can read these out from vec_list. # u is the position (vector) in the detector plane. Here, those are the grids tab_i and tab_j. # We need to calculate the dot product between all b_q and u, so in each iteration (for q), we simply add the # x and y component. cos_u_mat[:, :, q] = np.cos( px_sq_to_rad * (vec_list[NR_pairs_list[q, 0] - 1, NR_pairs_list[q, 1] - 1, 0] * tab_i) + px_sq_to_rad * (vec_list[NR_pairs_list[q, 0] - 1, NR_pairs_list[q, 1] - 1, 1] * tab_j)) * u.dimensionless_unscaled sum1 = np.sum( coef**2 ) # sum of all a_{k,l} in eq. 13 - this works only for single Zernikes (l fixed), because np.sum would sum over l too, which would be wrong. if telescope == 'JWST': sum2 = np.zeros( (int(im_size_pastis), int(im_size_pastis)) ) * u.nm * u.nm # setting it up with the correct units this will have elif telescope == 'ATLAST': sum2 = np.zeros( (int(2 * sz * sampling), int(2 * sz * sampling))) * u.nm * u.nm for q in range(NR_pairs_nb): sum2 = sum2 + generic_coef[q] * cos_u_mat[:, :, q] #-# Local Zernike if telescope == 'JWST': # Generate a basis of Zernikes with the mini segment being the support isolated_zerns = zern.hexike_basis(nterms=zern_max, npix=size_seg, rho=None, theta=None, vertical=False, outside=0.0) # Calculate the Zernike that is currently being used and put it on one single subaperture, the result is Zer # Apply the currently used Zernike to the mini-segment. if zernike_pol == 1: Zer = np.copy(mini_seg) elif zernike_pol in range(2, zern_max - 2): Zer = np.copy(mini_seg) Zer = Zer * isolated_zerns[zernike_pol - 1] # Fourier Transform of the Zernike - the global envelope mf = mft.MatrixFourierTransform() ft_zern = mf.perform(Zer, im_size_pastis / sampling, im_size_pastis) elif telescope == 'ATLAST': isolated_zerns = hcipy.make_zernike_basis(num_modes=zern_max, D=real_size_seg, grid=pupil_grid, radial_cutoff=False) Zer = hcipy.Wavefront(mini_seg_hc * isolated_zerns[zernike_pol - 1], wavelength=wvln.to(u.m).value) # Fourier transform the Zernike ft_zern = prop(Zer) #-# Final image if telescope == 'JWST': # Generating the final image that will get passed on to the outer scope, I(u) in eq. 13 intensity = np.abs(ft_zern)**2 * (sum1.value + 2. * sum2.value) elif telescope == 'ATLAST': intensity = ft_zern.intensity.shaped * (sum1.value + 2. * sum2.value) # PASTIS is only valid inside the dark hole, so we cut out only that part if telescope == 'JWST': tot_dh_im_size = sampling * (outer_wa + 3) intensity_zoom = util.zoom_cen( intensity, tot_dh_im_size ) # zoom box is (owa + 3*lambda/D) wide, in terms of lambda/D dh_area_zoom = util.zoom_cen(dh_area, tot_dh_im_size) dh_psf = dh_area_zoom * intensity_zoom elif telescope == 'ATLAST': dh_psf = dh_sz * intensity """ # Create plots. plt.subplot(1, 3, 1) plt.imshow(pupil, origin='lower') plt.title('JWST pupil and diameter definition') plt.plot([46.5, 464.5], [101.5, 409.5], 'r-') # show how the diagonal of the pupil is defined plt.subplot(1, 3, 2) plt.imshow(mini_seg, origin='lower') plt.title('JWST individual mini-segment') plt.subplot(1, 3, 3) plt.imshow(dh_psf, origin='lower') plt.title('JWST dark hole') plt.show() """ # dh_psf is the image of the dark hole only, the pixels outside of it are zero # intensity is the entire final image return dh_psf, intensity
# Plot cumulative contrast from E2E simulator, segment-based vs. uniform error budget ppl.plot_cumulative_contrast_compare_allocation( cumulative_opt_e2e, cumulative_e2e, os.path.join(workdir, 'results'), c_target, fname_suffix='segment-based-vs-uniform', save=True) ### Write full PDF report title_page_list = util.collect_title_page(workdir, c_target) util.create_title_page(instrument, workdir, title_page_list) util.create_pdf_report(workdir, c_target) ### DONE log.info(f"All saved in {os.path.join(workdir, 'results')}") log.info('Good job') if __name__ == '__main__': instrument = CONFIG_PASTIS.get('telescope', 'name') run = CONFIG_PASTIS.get('numerical', 'current_analysis') c_target = 1e-10 mc_repeat = 100 run_full_pastis_analysis(instrument, run_choice=run, c_target=c_target, n_repeat=mc_repeat)
import pastis.image_pastis as impastis log = logging.getLogger() try: import webbpsf except ImportError: log.info('WebbPSF was not imported.') if __name__ == '__main__': # Keep track of time start_time = time.time() # runtime currently is around 3 minutes # Parameters outDir = os.path.join(CONFIG_PASTIS.get('local', 'local_data_path'), 'active', 'calibration') telescope = CONFIG_PASTIS.get('telescope', 'name') fpm = CONFIG_PASTIS.get(telescope, 'focal_plane_mask') # focal plane mask lyot_stop = CONFIG_PASTIS.get(telescope, 'pupil_plane_stop') # Lyot stop filter = CONFIG_PASTIS.get(telescope, 'filter_name') tel_size_px = CONFIG_PASTIS.getint('numerical', 'tel_size_px') im_size_e2e = CONFIG_PASTIS.getint('numerical', 'im_size_px_webbpsf') size_seg = CONFIG_PASTIS.getint('numerical', 'size_seg') nb_seg = CONFIG_PASTIS.getint(telescope, 'nb_subapertures') wss_segs = webbpsf.constants.SEGNAMES_WSS_ORDER zern_max = CONFIG_PASTIS.getint('zernikes', 'max_zern') inner_wa = CONFIG_PASTIS.getint(telescope, 'IWA') outer_wa = CONFIG_PASTIS.getint(telescope, 'OWA') sampling = CONFIG_PASTIS.getfloat(telescope, 'sampling')
def calculate_unaberrated_contrast_and_normalization(instrument, design=None, return_coro_simulator=True, save_coro_floor=False, save_psfs=False, outpath=''): """ Calculate the direct PSF peak and unaberrated coronagraph floor of an instrument. :param instrument: string, 'LUVOIR', 'HiCAT' or 'JWST' :param design: str, optional, default=None, which means we read from the configfile: what coronagraph design to use - 'small', 'medium' or 'large' :param return_coro_simulator: bool, whether to return the coronagraphic simulator as third return, default True :param save: bool, if True, will save direct and coro PSF images to disk, default False :param outpath: string, where to save outputs to if save=True :return: contrast floor and PSF normalization factor, and optionally (by default) the simulator in coron mode """ if instrument == 'LUVOIR': # Instantiate LuvoirAPLC class sampling = CONFIG_PASTIS.getfloat(instrument, 'sampling') optics_input = CONFIG_PASTIS.get('LUVOIR', 'optics_path') if design is None: design = CONFIG_PASTIS.get('LUVOIR', 'coronagraph_design') luvoir = LuvoirAPLC(optics_input, design, sampling) # Calculate reference images for contrast normalization and coronagraph floor unaberrated_coro_psf, direct = luvoir.calc_psf(ref=True, display_intermediate=False, return_intermediate=False) norm = np.max(direct) direct_psf = direct.shaped coro_psf = unaberrated_coro_psf.shaped / norm # Return the coronagraphic simulator and DH mask coro_simulator = luvoir dh_mask = luvoir.dh_mask.shaped if instrument == 'HiCAT': # Set up HiCAT simulator in correct state hicat_sim = set_up_hicat(apply_continuous_dm_maps=True) # Calculate direct reference images for contrast normalization hicat_sim.include_fpm = False direct = hicat_sim.calc_psf() direct_psf = direct[0].data norm = direct_psf.max() # Calculate unaberrated coronagraph image for contrast floor hicat_sim.include_fpm = True coro_image = hicat_sim.calc_psf() coro_psf = coro_image[0].data / norm iwa = CONFIG_PASTIS.getfloat('HiCAT', 'IWA') owa = CONFIG_PASTIS.getfloat('HiCAT', 'OWA') sampling = CONFIG_PASTIS.getfloat('HiCAT', 'sampling') dh_mask = util.create_dark_hole(coro_psf, iwa, owa, sampling).astype('bool') # Return the coronagraphic simulator coro_simulator = hicat_sim if instrument == 'JWST': # Instantiate NIRCAM object jwst_sim = webbpsf_imaging.set_up_nircam() # this returns a tuple of two: jwst_sim[0] is the nircam object, jwst_sim[1] its ote # Calculate direct reference images for contrast normalization jwst_sim[0].image_mask = None direct = jwst_sim[0].calc_psf(nlambda=1) direct_psf = direct[0].data norm = direct_psf.max() # Calculate unaberrated coronagraph image for contrast floor jwst_sim[0].image_mask = CONFIG_PASTIS.get('JWST', 'focal_plane_mask') coro_image = jwst_sim[0].calc_psf(nlambda=1) coro_psf = coro_image[0].data / norm iwa = CONFIG_PASTIS.getfloat('JWST', 'IWA') owa = CONFIG_PASTIS.getfloat('JWST', 'OWA') sampling = CONFIG_PASTIS.getfloat('JWST', 'sampling') dh_mask = util.create_dark_hole(coro_psf, iwa, owa, sampling).astype('bool') # Return the coronagraphic simulator (a tuple in the JWST case!) coro_simulator = jwst_sim # Calculate coronagraph floor in dark hole contrast_floor = util.dh_mean(coro_psf, dh_mask) log.info(f'contrast floor: {contrast_floor}') if save_coro_floor: # Save contrast floor to text file with open(os.path.join(outpath, 'coronagraph_floor.txt'), 'w') as file: file.write(f'Coronagraph floor: {contrast_floor}') if save_psfs: # Save direct PSF, unaberrated coro PSF and DH masked coro PSF as PDF plt.figure(figsize=(18, 6)) plt.subplot(1, 3, 1) plt.title("Direct PSF") plt.imshow(direct_psf, norm=LogNorm()) plt.colorbar() plt.subplot(1, 3, 2) plt.title("Unaberrated coro PSF") plt.imshow(coro_psf, norm=LogNorm()) plt.colorbar() plt.subplot(1, 3, 3) plt.title("Dark hole coro PSF") plt.imshow(np.ma.masked_where(~dh_mask, coro_psf), norm=LogNorm()) plt.colorbar() plt.savefig(os.path.join(outpath, 'unaberrated_dh.pdf')) if return_coro_simulator: return contrast_floor, norm, coro_simulator else: return contrast_floor, norm
def num_matrix_jwst(): """ Generate a numerical PASTIS matrix for a JWST coronagraph. -- Depracated function, the LUVOIR PASTIS matrix is better calculated with num_matrix_multiprocess(), which can do this for your choice of one of the implemented instruments (LUVOIR, HiCAT, JWST). -- All inputs are read from the (local) configfile and saved to the specified output directory. """ import webbpsf from e2e_simulators import webbpsf_imaging as webbim # Set WebbPSF environment variable os.environ['WEBBPSF_PATH'] = CONFIG_PASTIS.get('local', 'webbpsf_data_path') # Keep track of time start_time = time.time() # runtime is currently around 21 minutes log.info('Building numerical matrix for JWST\n') # Parameters overall_dir = util.create_data_path(CONFIG_PASTIS.get('local', 'local_data_path'), telescope='jwst') resDir = os.path.join(overall_dir, 'matrix_numerical') which_tel = CONFIG_PASTIS.get('telescope', 'name') nb_seg = CONFIG_PASTIS.getint(which_tel, 'nb_subapertures') im_size_e2e = CONFIG_PASTIS.getint('numerical', 'im_size_px_webbpsf') inner_wa = CONFIG_PASTIS.getint(which_tel, 'IWA') outer_wa = CONFIG_PASTIS.getint(which_tel, 'OWA') sampling = CONFIG_PASTIS.getfloat(which_tel, 'sampling') fpm = CONFIG_PASTIS.get(which_tel, 'focal_plane_mask') # focal plane mask lyot_stop = CONFIG_PASTIS.get(which_tel, 'pupil_plane_stop') # Lyot stop filter = CONFIG_PASTIS.get(which_tel, 'filter_name') wfe_aber = CONFIG_PASTIS.getfloat(which_tel, 'calibration_aberration') * u.nm wss_segs = webbpsf.constants.SEGNAMES_WSS_ORDER zern_max = CONFIG_PASTIS.getint('zernikes', 'max_zern') zern_number = CONFIG_PASTIS.getint('calibration', 'local_zernike') zern_mode = util.ZernikeMode(zern_number) # Create Zernike mode object for easier handling wss_zern_nb = util.noll_to_wss(zern_number) # Convert from Noll to WSS framework # Create necessary directories if they don't exist yet os.makedirs(overall_dir, exist_ok=True) os.makedirs(resDir, exist_ok=True) os.makedirs(os.path.join(resDir, 'OTE_images'), exist_ok=True) os.makedirs(os.path.join(resDir, 'psfs'), exist_ok=True) os.makedirs(os.path.join(resDir, 'darkholes'), exist_ok=True) # Create the dark hole mask. pup_im = np.zeros([im_size_e2e, im_size_e2e]) # this is just used for DH mask generation dh_area = util.create_dark_hole(pup_im, inner_wa, outer_wa, sampling) # Create a direct WebbPSF image for normalization factor fake_aber = np.zeros([nb_seg, zern_max]) psf_perfect = webbim.nircam_nocoro(filter, fake_aber) normp = np.max(psf_perfect) psf_perfect = psf_perfect / normp # Set up NIRCam coro object from WebbPSF nc_coro = webbpsf.NIRCam() nc_coro.filter = filter nc_coro.image_mask = fpm nc_coro.pupil_mask = lyot_stop # Null the OTE OPDs for the PSFs, maybe we will add internal WFE later. nc_coro, ote_coro = webbpsf.enable_adjustable_ote(nc_coro) # create OTE for coronagraph nc_coro.include_si_wfe = False # set SI internal WFE to zero #-# Generating the PASTIS matrix and a list for all contrasts contrast_matrix = np.zeros([nb_seg, nb_seg]) # Generate empty matrix all_psfs = [] all_dhs = [] all_contrasts = [] log.info(f'wfe_aber: {wfe_aber}') for i in range(nb_seg): for j in range(nb_seg): log.info(f'\nSTEP: {i+1}-{j+1} / {nb_seg}-{nb_seg}') # Get names of segments, they're being addressed by their names in the ote functions. seg_i = wss_segs[i].split('-')[0] seg_j = wss_segs[j].split('-')[0] # Put the aberration on the correct segments Aber_WSS = np.zeros([nb_seg, zern_max]) # The Zernikes here will be filled in the WSS order!!! # Because it goes into _apply_hexikes_to_seg(). Aber_WSS[i, wss_zern_nb - 1] = wfe_aber.to(u.m).value # Aberration on the segment we're currently working on; # convert to meters; -1 on the Zernike because Python starts # numbering at 0. Aber_WSS[j, wss_zern_nb - 1] = wfe_aber.to(u.m).value # same for other segment # Putting aberrations on segments i and j ote_coro.reset() # Making sure there are no previous movements on the segments. ote_coro.zero() # set OTE for coronagraph to zero # Apply both aberrations to OTE. If i=j, apply only once! ote_coro._apply_hexikes_to_seg(seg_i, Aber_WSS[i, :]) # set segment i (segment numbering starts at 1) if i != j: ote_coro._apply_hexikes_to_seg(seg_j, Aber_WSS[j, :]) # set segment j # If you want to display it: # ote_coro.display_opd() # plt.show() # Save OPD images for testing opd_name = f'opd_{zern_mode.name}_{zern_mode.convention + str(zern_mode.index)}_segs_{i+1}-{j+1}' plt.clf() ote_coro.display_opd() plt.savefig(os.path.join(resDir, 'OTE_images', opd_name + '.pdf')) log.info('Calculating WebbPSF image') image = nc_coro.calc_psf(fov_pixels=int(im_size_e2e), oversample=1, nlambda=1) psf = image[0].data / normp # Save WebbPSF image to disk filename_psf = f'psf_{zern_mode.name}_{zern_mode.convention + str(zern_mode.index)}_segs_{i+1}-{j+1}' util.write_fits(psf, os.path.join(resDir, 'psfs', filename_psf + '.fits'), header=None, metadata=None) all_psfs.append(psf) log.info('Calculating mean contrast in dark hole') dh_intensity = psf * dh_area contrast = np.mean(dh_intensity[np.where(dh_intensity != 0)]) log.info(f'contrast: {contrast}') # Save DH image to disk and put current contrast in list filename_dh = f'dh_{zern_mode.name}_{zern_mode.convention + str(zern_mode.index)}_segs_{i+1}-{j+1}' util.write_fits(dh_intensity, os.path.join(resDir, 'darkholes', filename_dh + '.fits'), header=None, metadata=None) all_dhs.append(dh_intensity) all_contrasts.append(contrast) # Fill according entry in the matrix contrast_matrix[i,j] = contrast # Transform saved lists to arrays all_psfs = np.array(all_psfs) all_dhs = np.array(all_dhs) all_contrasts = np.array(all_contrasts) # Filling the off-axis elements matrix_two_N = np.copy(contrast_matrix) # This is just an intermediary copy so that I don't mix things up. matrix_pastis = np.copy(contrast_matrix) # This will be the final PASTIS matrix. for i in range(nb_seg): for j in range(nb_seg): if i != j: matrix_off_val = (matrix_two_N[i,j] - matrix_two_N[i,i] - matrix_two_N[j,j]) / 2. matrix_pastis[i,j] = matrix_off_val log.info(f'Off-axis for i{i+1}-j{j+1}: {matrix_off_val}') # Normalize matrix for the input aberration matrix_pastis /= np.square(wfe_aber.value) # Save matrix to file filename_matrix = f'PASTISmatrix_num_{zern_mode.name}_{zern_mode.convention + str(zern_mode.index)}' util.write_fits(matrix_pastis, os.path.join(resDir, filename_matrix + '.fits'), header=None, metadata=None) log.info(f'Matrix saved to: {os.path.join(resDir, filename_matrix + ".fits")}') # Save the PSF and DH image *cubes* as well (as opposed to each one individually) util.write_fits(all_psfs, os.path.join(resDir, 'psfs', 'psf_cube.fits'), header=None, metadata=None) util.write_fits(all_dhs, os.path.join(resDir, 'darkholes', 'dh_cube.fits'), header=None, metadata=None) np.savetxt(os.path.join(resDir, 'pair-wise_contrasts.txt'), all_contrasts, fmt='%e') # Tell us how long it took to finish. end_time = time.time() log.info(f'Runtime for matrix_building.py: {end_time - start_time}sec = {(end_time - start_time) / 60}min') log.info(f'Data saved to {resDir}')
def num_matrix_luvoir(design, savepsfs=False, saveopds=True): """ Generate a numerical PASTIS matrix for a LUVOIR A coronagraph. -- Depracated function, the LUVOIR PASTIS matrix is better calculated with num_matrix_multiprocess(), which can do this for your choice of one of the implemented instruments (LUVOIR, HiCAT, JWST). -- All inputs are read from the (local) configfile and saved to the specified output directory. The LUVOIR STDT delivery in May 2018 included three different apodizers we can work with, you pick which of the three you want with the 'design' parameter. :param design: string, what coronagraph design to use - 'small', 'medium' or 'large' :param savepsfs: bool, if True, all PSFs will be saved to disk individually, as fits files, additionally to the total PSF cube. If False, the total cube will still get saved at the very end of the script. :param saveopds: bool, if True, all pupil surface maps of aberrated segment pairs will be saved to disk as PDF :return overall_dir: string, experiment directory """ # Keep track of time start_time = time.time() ### Parameters # System parameters overall_dir = util.create_data_path(CONFIG_PASTIS.get('local', 'local_data_path'), telescope='luvoir-'+design) os.makedirs(overall_dir, exist_ok=True) resDir = os.path.join(overall_dir, 'matrix_numerical') # Create necessary directories if they don't exist yet os.makedirs(resDir, exist_ok=True) os.makedirs(os.path.join(resDir, 'OTE_images'), exist_ok=True) os.makedirs(os.path.join(resDir, 'psfs'), exist_ok=True) # Set up logger util.setup_pastis_logging(resDir, f'pastis_matrix_{design}') log.info('Building numerical matrix for LUVOIR\n') # Read calibration aberration zern_number = CONFIG_PASTIS.getint('calibration', 'local_zernike') zern_mode = util.ZernikeMode(zern_number) # Create Zernike mode object for easier handling # General telescope parameters nb_seg = CONFIG_PASTIS.getint('LUVOIR', 'nb_subapertures') wvln = CONFIG_PASTIS.getfloat('LUVOIR', 'lambda') * 1e-9 # m diam = CONFIG_PASTIS.getfloat('LUVOIR', 'diameter') # m wfe_aber = CONFIG_PASTIS.getfloat('LUVOIR', 'calibration_aberration') * 1e-9 # m # Image system parameters sampling = CONFIG_PASTIS.getfloat('LUVOIR', 'sampling') # Record some of the defined parameters log.info(f'LUVOIR apodizer design: {design}') log.info(f'Wavelength: {wvln} m') log.info(f'Telescope diameter: {diam} m') log.info(f'Number of segments: {nb_seg}') log.info(f'Sampling: {sampling} px per lambda/D') log.info(f'wfe_aber: {wfe_aber} m') # Copy configfile to resulting matrix directory util.copy_config(resDir) ### Instantiate Luvoir telescope with chosen apodizer design optics_input = CONFIG_PASTIS.get('LUVOIR', 'optics_path') luvoir = LuvoirAPLC(optics_input, design, sampling) ### Reference images for contrast normalization and coronagraph floor unaberrated_coro_psf, ref = luvoir.calc_psf(ref=True, display_intermediate=False, return_intermediate=False) norm = np.max(ref) dh_intensity = (unaberrated_coro_psf / norm) * luvoir.dh_mask contrast_floor = np.mean(dh_intensity[np.where(luvoir.dh_mask != 0)]) log.info(f'contrast floor: {contrast_floor}') ### Generating the PASTIS matrix and a list for all contrasts contrast_matrix = np.zeros([nb_seg, nb_seg]) # Generate empty matrix all_psfs = [] all_contrasts = [] for i in range(nb_seg): for j in range(nb_seg): log.info(f'\nSTEP: {i+1}-{j+1} / {nb_seg}-{nb_seg}') # Put aberration on correct segments. If i=j, apply only once! luvoir.flatten() luvoir.set_segment(i+1, wfe_aber/2, 0, 0) if i != j: luvoir.set_segment(j+1, wfe_aber/2, 0, 0) log.info('Calculating coro image...') image, inter = luvoir.calc_psf(ref=False, display_intermediate=False, return_intermediate='intensity') # Normalize PSF by reference image psf = image / norm all_psfs.append(psf.shaped) # Save image to disk if savepsfs: # TODO: I might want to change this to matplotlib images since I save the PSF cube anyway. filename_psf = f'psf_{zern_mode.name}_{zern_mode.convention + str(zern_mode.index)}_segs_{i+1}-{j+1}' hcipy.write_fits(psf, os.path.join(resDir, 'psfs', filename_psf + '.fits')) # Save OPD images for testing if saveopds: opd_name = f'opd_{zern_mode.name}_{zern_mode.convention + str(zern_mode.index)}_segs_{i+1}-{j+1}' plt.clf() hcipy.imshow_field(inter['seg_mirror'], mask=luvoir.aperture, cmap='RdBu') plt.savefig(os.path.join(resDir, 'OTE_images', opd_name + '.pdf')) log.info('Calculating mean contrast in dark hole') dh_intensity = psf * luvoir.dh_mask contrast = np.mean(dh_intensity[np.where(luvoir.dh_mask != 0)]) log.info(f'contrast: {float(contrast)}') # contrast is a Field, here casting to normal float all_contrasts.append(contrast) # Fill according entry in the matrix and subtract baseline contrast contrast_matrix[i,j] = contrast - contrast_floor # Transform saved lists to arrays all_psfs = np.array(all_psfs) all_contrasts = np.array(all_contrasts) # Save the PSF image *cube* as well (as opposed to each one individually) hcipy.write_fits(all_psfs, os.path.join(resDir, 'psfs', 'psf_cube.fits'),) np.savetxt(os.path.join(resDir, 'pair-wise_contrasts.txt'), all_contrasts, fmt='%e') # Filling the off-axis elements log.info('\nCalculating off-axis matrix elements...') matrix_two_N = np.copy(contrast_matrix) # This is just an intermediary copy so that I don't mix things up. matrix_pastis = np.copy(contrast_matrix) # This will be the final PASTIS matrix. for i in range(nb_seg): for j in range(nb_seg): if i != j: matrix_off_val = (matrix_two_N[i,j] - matrix_two_N[i,i] - matrix_two_N[j,j]) / 2. matrix_pastis[i,j] = matrix_off_val log.info(f'Off-axis for i{i+1}-j{j+1}: {matrix_off_val}') # Normalize matrix for the input aberration - this defines what units the PASTIS matrix will be in. The PASTIS # matrix propagation function (util.pastis_contrast()) then needs to take in the aberration vector in these same # units. I have chosen to keep this to 1nm, so, we normalize the PASTIS matrix to units of nanometers. matrix_pastis /= np.square(wfe_aber * 1e9) # 1e9 converts the calibration aberration back to nanometers # Save matrix to file filename_matrix = f'PASTISmatrix_num_{zern_mode.name}_{zern_mode.convention + str(zern_mode.index)}' hcipy.write_fits(matrix_pastis, os.path.join(resDir, filename_matrix + '.fits')) log.info(f'Matrix saved to: {os.path.join(resDir, filename_matrix + ".fits")}') # Tell us how long it took to finish. end_time = time.time() log.info(f'Runtime for matrix_building.py: {end_time - start_time}sec = {(end_time - start_time) / 60}min') log.info(f'Data saved to {resDir}') return overall_dir
def run_full_pastis_analysis(instrument, run_choice, design=None, c_target=1e-10, n_repeat=100): """ Run a full PASTIS analysis on a given PASTIS matrix. The first couple of lines contain switches to turn different parts of the analysis on and off. These include: 1. calculating the PASTIS modes 2. calculating the PASTIS mode weights sigma under assumption of a uniform contrast allocation across all modes 3. running an E2E Monte Carlo simulation on the modes with their weights sigma from the uniform contrast allocation 4. calculating a cumulative contrast plot from the sigmas of the uniform contrast allocation 5. calculating the segment constraints mu under assumption of uniform statistical contrast contribution across segments 6. running an E2E Monte Carlo simulation on the segments with their weights mu 7. calculating the segment- and mode-space covariance matrices Ca and Cb 8. analytically calculating the statistical mean contrast and its variance 9. calculting segment-based error budget :param instrument: str, "LUVOIR", "HiCAT" or "JWST" :param run_choice: str, path to data and where outputs will be saved :param design: str, optional, default=None, which means we read from the configfile (if running for LUVOIR): what coronagraph design to use - 'small', 'medium' or 'large' :param c_target: float, target contrast :param n_repeat: number of realizations in both Monte Carlo simulations (modes and segments), default=100 """ # Which parts are we running? calculate_modes = True calculate_sigmas = True run_monte_carlo_modes = True calc_cumulative_contrast = True calculate_mus = True run_monte_carlo_segments = True calculate_covariance_matrices = True analytical_statistics = True calculate_segment_based = True # Data directory workdir = os.path.join(CONFIG_PASTIS.get('local', 'local_data_path'), run_choice) nseg = CONFIG_PASTIS.getint(instrument, 'nb_subapertures') wvln = CONFIG_PASTIS.getfloat(instrument, 'lambda') * 1e-9 # [m] log.info('Setting up optics...') log.info(f'Data folder: {workdir}') log.info(f'Instrument: {instrument}') # Set up simulator, calculate reference PSF and dark hole mask # TODO: replace this section with calculate_unaberrated_contrast_and_normalization(). This will require to save out # reference and unaberrated coronagraphic PSF already in matrix generation. if instrument == "LUVOIR": if design is None: design = CONFIG_PASTIS.get('LUVOIR', 'coronagraph_design') log.info(f'Coronagraph design: {design}') sampling = CONFIG_PASTIS.getfloat('LUVOIR', 'sampling') optics_input = CONFIG_PASTIS.get('LUVOIR', 'optics_path') luvoir = LuvoirAPLC(optics_input, design, sampling) # Generate reference PSF and unaberrated coronagraphic image luvoir.flatten() psf_unaber, ref = luvoir.calc_psf(ref=True, display_intermediate=False) norm = ref.max() psf_unaber = psf_unaber.shaped / norm dh_mask = luvoir.dh_mask.shaped sim_instance = luvoir if instrument == 'HiCAT': hicat_sim = set_up_hicat(apply_continuous_dm_maps=True) # Generate reference PSF and unaberrated coronagraphic image hicat_sim.include_fpm = False direct = hicat_sim.calc_psf() norm = direct[0].data.max() hicat_sim.include_fpm = True coro_image = hicat_sim.calc_psf() psf_unaber = coro_image[0].data / norm # Create DH mask iwa = CONFIG_PASTIS.getfloat('HiCAT', 'IWA') owa = CONFIG_PASTIS.getfloat('HiCAT', 'OWA') sampling = CONFIG_PASTIS.getfloat('HiCAT', 'sampling') dh_mask = util.create_dark_hole(psf_unaber, iwa, owa, sampling).astype('bool') sim_instance = hicat_sim if instrument == 'JWST': jwst_sim = webbpsf_imaging.set_up_nircam( ) # this returns a tuple of two: jwst_sim[0] is the nircam object, jwst_sim[1] its ote # Generate reference PSF and unaberrated coronagraphic image jwst_sim[0].image_mask = None direct = jwst_sim[0].calc_psf(nlambda=1) direct_psf = direct[0].data norm = direct_psf.max() jwst_sim[0].image_mask = CONFIG_PASTIS.get('JWST', 'focal_plane_mask') coro_image = jwst_sim[0].calc_psf(nlambda=1) psf_unaber = coro_image[0].data / norm # Create DH mask iwa = CONFIG_PASTIS.getfloat('JWST', 'IWA') owa = CONFIG_PASTIS.getfloat('JWST', 'OWA') sampling = CONFIG_PASTIS.getfloat('JWST', 'sampling') dh_mask = util.create_dark_hole(psf_unaber, iwa, owa, sampling).astype('bool') sim_instance = jwst_sim # TODO: this would also be part of the refactor mentioned above # Calculate coronagraph contrast floor coro_floor = util.dh_mean(psf_unaber, dh_mask) log.info(f'Coronagraph floor: {coro_floor}') # Read the PASTIS matrix matrix = fits.getdata( os.path.join(workdir, 'matrix_numerical', 'PASTISmatrix_num_piston_Noll1.fits')) ### Calculate PASTIS modes and singular values/eigenvalues if calculate_modes: log.info('Calculating all PASTIS modes') pmodes, svals = modes_from_matrix(instrument, workdir) ### Get full 2D modes and save them mode_cube = full_modes_from_themselves(instrument, pmodes, workdir, sim_instance, saving=True) else: log.info(f'Reading PASTIS modes from {workdir}') pmodes, svals = modes_from_file(workdir) ### Calculate mode-based static constraints if calculate_sigmas: log.info('Calculating static sigmas') sigmas = calculate_sigma(c_target, nseg, svals, coro_floor) np.savetxt( os.path.join(workdir, 'results', f'mode_requirements_{c_target}_uniform.txt'), sigmas) # Plot static mode constraints ppl.plot_mode_weights_simple(sigmas, wvln, out_dir=os.path.join(workdir, 'results'), c_target=c_target, fname_suffix='uniform', save=True) else: log.info(f'Reading sigmas from {workdir}') sigmas = np.loadtxt( os.path.join(workdir, 'results', f'mode_requirements_{c_target}_uniform.txt')) ### Calculate Monte Carlo simulation for sigmas, with E2E if run_monte_carlo_modes: log.info('\nRunning Monte Carlo simulation for modes') # Keep track of time start_monte_carlo_modes = time.time() all_contr_rand_modes = [] all_random_weight_sets = [] for rep in range(n_repeat): log.info(f'Mode realization {rep + 1}/{n_repeat}') random_weights, one_contrast_mode = calc_random_mode_configurations( instrument, pmodes, sim_instance, sigmas, dh_mask, norm) all_random_weight_sets.append(random_weights) all_contr_rand_modes.append(one_contrast_mode) # Empirical mean and standard deviation of the distribution mean_modes = np.mean(all_contr_rand_modes) stddev_modes = np.std(all_contr_rand_modes) log.info(f'Mean of the Monte Carlo result modes: {mean_modes}') log.info( f'Standard deviation of the Monte Carlo result modes: {stddev_modes}' ) end_monte_carlo_modes = time.time() # Save Monte Carlo simulation np.savetxt( os.path.join(workdir, 'results', f'mc_mode_reqs_{c_target}.txt'), all_random_weight_sets) np.savetxt( os.path.join(workdir, 'results', f'mc_modes_contrasts_{c_target}.txt'), all_contr_rand_modes) ppl.plot_monte_carlo_simulation(all_contr_rand_modes, out_dir=os.path.join( workdir, 'results'), c_target=c_target, segments=False, stddev=stddev_modes, save=True) ### Calculate cumulative contrast plot with E2E simulator and matrix product if calc_cumulative_contrast: log.info( 'Calculating cumulative contrast plot, uniform contrast across all modes' ) cumulative_e2e = cumulative_contrast_e2e(instrument, pmodes, sigmas, sim_instance, dh_mask, norm) cumulative_pastis = cumulative_contrast_matrix(pmodes, sigmas, matrix, coro_floor) np.savetxt( os.path.join(workdir, 'results', f'cumul_contrast_accuracy_e2e_{c_target}.txt'), cumulative_e2e) np.savetxt( os.path.join(workdir, 'results', f'cumul_contrast_accuracy_pastis_{c_target}.txt'), cumulative_pastis) # Plot the cumulative contrast from E2E simulator and matrix ppl.plot_cumulative_contrast_compare_accuracy(cumulative_pastis, cumulative_e2e, out_dir=os.path.join( workdir, 'results'), c_target=c_target, save=True) else: log.info('Loading uniform cumulative contrast from disk.') cumulative_e2e = np.loadtxt( os.path.join(workdir, 'results', f'cumul_contrast_accuracy_e2e_{c_target}.txt')) ### Calculate segment-based static constraints if calculate_mus: log.info('Calculating segment-based constraints') mus = calculate_segment_constraints(pmodes, matrix, c_target, coro_floor) np.savetxt( os.path.join(workdir, 'results', f'segment_requirements_{c_target}.txt'), mus) ppl.plot_segment_weights(mus, out_dir=os.path.join(workdir, 'results'), c_target=c_target, save=True) ppl.plot_mu_map(instrument, mus, sim_instance, out_dir=os.path.join(workdir, 'results'), c_target=c_target, save=True) # Apply mu map directly and run through E2E simulator mus *= u.nm if instrument == 'LUVOIR': sim_instance.flatten() for seg, mu in enumerate(mus): sim_instance.set_segment(seg + 1, mu.to(u.m).value / 2, 0, 0) im_data = sim_instance.calc_psf() psf_pure_mu_map = im_data.shaped if instrument == 'HiCAT': sim_instance.iris_dm.flatten() for seg, mu in enumerate(mus): sim_instance.iris_dm.set_actuator(seg, mu / 1e9, 0, 0) # /1e9 converts to meters im_data = sim_instance.calc_psf() psf_pure_mu_map = im_data[0].data if instrument == 'JWST': sim_instance[1].zero() for seg, mu in enumerate(mus): seg_num = webbpsf_imaging.WSS_SEGS[seg].split('-')[0] sim_instance[1].move_seg_local(seg_num, piston=mu.value, trans_unit='nm') im_data = sim_instance[0].calc_psf(nlambda=1) psf_pure_mu_map = im_data[0].data contrast_mu = util.dh_mean(psf_pure_mu_map / norm, dh_mask) log.info(f'Contrast with pure mu-map: {contrast_mu}') else: log.info(f'Reading mus from {workdir}') mus = np.loadtxt( os.path.join(workdir, 'results', f'segment_requirements_{c_target}.txt')) mus *= u.nm ### Calculate Monte Carlo confirmation for segments, with E2E if run_monte_carlo_segments: log.info('\nRunning Monte Carlo simulation for segments') # Keep track of time start_monte_carlo_seg = time.time() all_contr_rand_seg = [] all_random_maps = [] for rep in range(n_repeat): log.info(f'Segment realization {rep + 1}/{n_repeat}') random_map, one_contrast_seg = calc_random_segment_configuration( instrument, sim_instance, mus, dh_mask, norm) all_random_maps.append(random_map) all_contr_rand_seg.append(one_contrast_seg) # Empirical mean and standard deviation of the distribution mean_segments = np.mean(all_contr_rand_seg) stddev_segments = np.std(all_contr_rand_seg) log.info(f'Mean of the Monte Carlo result segments: {mean_segments}') log.info( f'Standard deviation of the Monte Carlo result segments: {stddev_segments}' ) with open( os.path.join(workdir, 'results', f'statistical_contrast_empirical_{c_target}.txt'), 'w') as file: file.write(f'Empirical, statistical mean: {mean_segments}') file.write(f'\nEmpirical variance: {stddev_segments**2}') end_monte_carlo_seg = time.time() log.info('\nRuntimes:') log.info( 'Monte Carlo on segments with {} iterations: {} sec = {} min = {} h' .format(n_repeat, end_monte_carlo_seg - start_monte_carlo_seg, (end_monte_carlo_seg - start_monte_carlo_seg) / 60, (end_monte_carlo_seg - start_monte_carlo_seg) / 3600)) # Save Monte Carlo simulation np.savetxt( os.path.join(workdir, 'results', f'mc_segment_req_maps_{c_target}.txt'), all_random_maps) # in m np.savetxt( os.path.join(workdir, 'results', f'mc_segments_contrasts_{c_target}.txt'), all_contr_rand_seg) ppl.plot_monte_carlo_simulation(all_contr_rand_seg, out_dir=os.path.join( workdir, 'results'), c_target=c_target, segments=True, stddev=stddev_segments, save=True) ### Calculate covariance matrices if calculate_covariance_matrices: log.info('Calculating covariance matrices') Ca = np.diag(np.square(mus.value)) hcipy.write_fits( Ca, os.path.join( workdir, 'results', f'cov_matrix_segments_Ca_{c_target}_segment-based.fits')) Cb = np.dot(np.transpose(pmodes), np.dot(Ca, pmodes)) hcipy.write_fits( Cb, os.path.join(workdir, 'results', f'cov_matrix_modes_Cb_{c_target}_segment-based.fits')) ppl.plot_covariance_matrix(Ca, os.path.join(workdir, 'results'), c_target, segment_space=True, fname_suffix='segment-based', save=True) ppl.plot_covariance_matrix(Cb, os.path.join(workdir, 'results'), c_target, segment_space=False, fname_suffix='segment-based', save=True) else: log.info('Loading covariance matrices from disk.') Ca = fits.getdata( os.path.join( workdir, 'results', f'cov_matrix_segments_Ca_{c_target}_segment-based.fits')) Cb = fits.getdata( os.path.join(workdir, 'results', f'cov_matrix_modes_Cb_{c_target}_segment-based.fits')) ### Analytically calculate statistical mean contrast and its variance if analytical_statistics: log.info('Calculating analytical statistics.') mean_stat_c = util.calc_statistical_mean_contrast( matrix, Ca, coro_floor) var_c = util.calc_variance_of_mean_contrast(matrix, Ca) log.info(f'Analytical statistical mean: {mean_stat_c}') log.info(f'Analytical standard deviation: {np.sqrt(var_c)}') with open( os.path.join( workdir, 'results', f'statistical_contrast_analytical_{c_target}.txt'), 'w') as file: file.write(f'Analytical, statistical mean: {mean_stat_c}') file.write(f'\nAnalytical variance: {var_c}') ### Calculate segment-based error budget if calculate_segment_based: log.info('Calculating segment-based error budget.') # Extract segment-based mode weights log.info('Calculate segment-based mode weights') sigmas_opt = np.sqrt(np.diag(Cb)) np.savetxt( os.path.join(workdir, 'results', f'mode_requirements_{c_target}_segment-based.txt'), sigmas_opt) ppl.plot_mode_weights_simple(sigmas_opt, wvln, out_dir=os.path.join(workdir, 'results'), c_target=c_target, fname_suffix='segment-based', save=True) ppl.plot_mode_weights_double_axis( (sigmas, sigmas_opt), wvln, os.path.join(workdir, 'results'), c_target, fname_suffix='segment-based-vs-uniform', labels=('Uniform error budget', 'Segment-based error budget'), alphas=(0.5, 1.), linestyles=('--', '-'), colors=('k', 'r'), save=True) # Calculate contrast per mode log.info('Calculating contrast per mode') per_mode_opt_e2e = cumulative_contrast_e2e(instrument, pmodes, sigmas_opt, sim_instance, dh_mask, norm, individual=True) np.savetxt( os.path.join( workdir, 'results', f'contrast_per_mode_{c_target}_e2e_segment-based.txt'), per_mode_opt_e2e) ppl.plot_contrast_per_mode(per_mode_opt_e2e, coro_floor, c_target, pmodes.shape[0], os.path.join(workdir, 'results'), save=True) # Calculate segment-based cumulative contrast log.info('Calculating segment-based cumulative contrast') cumulative_opt_e2e = cumulative_contrast_e2e(instrument, pmodes, sigmas_opt, sim_instance, dh_mask, norm) np.savetxt( os.path.join( workdir, 'results', f'cumul_contrast_allocation_e2e_{c_target}_segment-based.txt'), cumulative_opt_e2e) # Plot cumulative contrast from E2E simulator, segment-based vs. uniform error budget ppl.plot_cumulative_contrast_compare_allocation( cumulative_opt_e2e, cumulative_e2e, os.path.join(workdir, 'results'), c_target, fname_suffix='segment-based-vs-uniform', save=True) ### Write full PDF report title_page_list = util.collect_title_page(workdir, c_target) util.create_title_page(instrument, workdir, title_page_list) util.create_pdf_report(workdir, c_target) ### DONE log.info(f"All saved in {os.path.join(workdir, 'results')}") log.info('Good job')
def num_matrix_multiprocess(instrument, design=None, savepsfs=True, saveopds=True): """ Generate a numerical/semi-analytical PASTIS matrix. Multiprocessed script to calculate PASTIS matrix. Implementation adapted from hicat.scripts.stroke_minimization.calculate_jacobian :param instrument: str, what instrument (LUVOIR, HiCAT, JWST) to generate the PASTIS matrix for :param design: str, optional, default=None, which means we read from the configfile: what coronagraph design to use - 'small', 'medium' or 'large' :param savepsfs: bool, if True, all PSFs will be saved to disk individually, as fits files. :param saveopds: bool, if True, all pupil surface maps of aberrated segment pairs will be saved to disk as PDF :return: overall_dir: string, experiment directory """ # Keep track of time start_time = time.time() # runtime is currently around 150 minutes ### Parameters # Create directory names tel_suffix = f'{instrument.lower()}' if instrument == 'LUVOIR': if design is None: design = CONFIG_PASTIS.get('LUVOIR', 'coronagraph_design') tel_suffix += f'-{design}' overall_dir = util.create_data_path(CONFIG_PASTIS.get('local', 'local_data_path'), telescope=tel_suffix) os.makedirs(overall_dir, exist_ok=True) resDir = os.path.join(overall_dir, 'matrix_numerical') # Create necessary directories if they don't exist yet os.makedirs(resDir, exist_ok=True) os.makedirs(os.path.join(resDir, 'OTE_images'), exist_ok=True) os.makedirs(os.path.join(resDir, 'psfs'), exist_ok=True) # Set up logger util.setup_pastis_logging(resDir, f'pastis_matrix_{tel_suffix}') log.info(f'Building numerical matrix for {tel_suffix}\n') # Read calibration aberration zern_number = CONFIG_PASTIS.getint('calibration', 'local_zernike') zern_mode = util.ZernikeMode(zern_number) # Create Zernike mode object for easier handling # General telescope parameters nb_seg = CONFIG_PASTIS.getint(instrument, 'nb_subapertures') seglist = util.get_segment_list(instrument) wvln = CONFIG_PASTIS.getfloat(instrument, 'lambda') * 1e-9 # m wfe_aber = CONFIG_PASTIS.getfloat(instrument, 'calibration_aberration') * 1e-9 # m # Record some of the defined parameters log.info(f'Instrument: {tel_suffix}') log.info(f'Wavelength: {wvln} m') log.info(f'Number of segments: {nb_seg}') log.info(f'Segment list: {seglist}') log.info(f'wfe_aber: {wfe_aber} m') log.info(f'Total number of segment pairs in {instrument} pupil: {len(list(util.segment_pairs_all(nb_seg)))}') log.info(f'Non-repeating pairs in {instrument} pupil calculated here: {len(list(util.segment_pairs_non_repeating(nb_seg)))}') # Copy configfile to resulting matrix directory util.copy_config(resDir) # Calculate coronagraph floor, and normalization factor from direct image contrast_floor, norm = calculate_unaberrated_contrast_and_normalization(instrument, design, return_coro_simulator=False, save_coro_floor=True, save_psfs=False, outpath=overall_dir) # Figure out how many processes is optimal and create a Pool. # Assume we're the only one on the machine so we can hog all the resources. # We expect numpy to use multithreaded math via the Intel MKL library, so # we check how many threads MKL will use, and create enough processes so # as to use 100% of the CPU cores. # You might think we should divide number of cores by 2 to get physical cores # to account for hyperthreading, however empirical testing on telserv3 shows that # it is slightly more performant on telserv3 to use all logical cores num_cpu = multiprocessing.cpu_count() # try: # import mkl # num_core_per_process = mkl.get_max_threads() # except ImportError: # # typically this is 4, so use that as default # log.info("Couldn't import MKL; guessing default value of 4 cores per process") # num_core_per_process = 4 num_core_per_process = 1 # NOTE: this was changed by Scott Will in HiCAT and makes more sense, somehow num_processes = int(num_cpu // num_core_per_process) log.info(f"Multiprocess PASTIS matrix for {instrument} will use {num_processes} processes (with {num_core_per_process} threads per process)") # Set up a function with all arguments fixed except for the last one, which is the segment pair tuple if instrument == 'LUVOIR': calculate_matrix_pair = functools.partial(_luvoir_matrix_one_pair, design, norm, wfe_aber, zern_mode, resDir, savepsfs, saveopds) if instrument == 'HiCAT': # Copy used BostonDM maps to matrix folder shutil.copytree(CONFIG_PASTIS.get('HiCAT', 'dm_maps_path'), os.path.join(resDir, 'hicat_boston_dm_commands')) calculate_matrix_pair = functools.partial(_hicat_matrix_one_pair, norm, wfe_aber, resDir, savepsfs, saveopds) if instrument == 'JWST': calculate_matrix_pair = functools.partial(_jwst_matrix_one_pair, norm, wfe_aber, resDir, savepsfs, saveopds) # Iterate over all segment pairs via a multiprocess pool mypool = multiprocessing.Pool(num_processes) t_start = time.time() results = mypool.map(calculate_matrix_pair, util.segment_pairs_non_repeating(nb_seg)) # this util function returns a generator t_stop = time.time() log.info(f"Multiprocess calculation complete in {t_stop-t_start}sec = {(t_stop-t_start)/60}min") # Unscramble results # results is a list of tuples that contain the return from the partial function, in this case: result[i] = (c, (seg1, seg2)) contrast_matrix = np.zeros([nb_seg, nb_seg]) # Generate empty matrix for i in range(len(results)): # Fill according entry in the matrix and subtract baseline contrast contrast_matrix[results[i][1][0], results[i][1][1]] = results[i][0] - contrast_floor mypool.close() # Save all contrasts to disk, WITH subtraction of coronagraph floor hcipy.write_fits(contrast_matrix, os.path.join(resDir, 'pair-wise_contrasts.fits')) plt.figure(figsize=(10, 10)) plt.imshow(contrast_matrix) plt.colorbar() plt.savefig(os.path.join(resDir, 'contrast_matrix.pdf')) # Calculate the PASTIS matrix from the contrast matrix: off-axis elements and normalization matrix_pastis = pastis_from_contrast_matrix(contrast_matrix, seglist, wfe_aber) # Save matrix to file filename_matrix = f'PASTISmatrix_num_{zern_mode.name}_{zern_mode.convention + str(zern_mode.index)}' hcipy.write_fits(matrix_pastis, os.path.join(resDir, filename_matrix + '.fits')) ppl.plot_pastis_matrix(matrix_pastis, wvln*1e9, out_dir=resDir, save=True) # convert wavelength to nm log.info(f'Matrix saved to: {os.path.join(resDir, filename_matrix + ".fits")}') # Tell us how long it took to finish. end_time = time.time() log.info(f'Runtime for matrix_building_numerical.py/multiprocess: {end_time - start_time}sec = {(end_time - start_time)/60}min') log.info(f'Data saved to {resDir}') return overall_dir
def hockeystick_jwst(range_points=3, no_realizations=3, matrix_mode='analytical'): """ Construct a PASTIS hockeystick contrast curve for validation of the PASTIS matrix for JWST. The aberration range is a fixed parameter in the function body since it depends on the coronagraph (and telescope) used. We define how many realizations of a specific rms error we want to run through, and also how many points we want to fill the aberration range with. At each point we calculate the contrast for all realizations and plot the mean of this set of results in a figure that shows contrast vs. rms phase error. :param range_points: int, How many points of rms error (OPD) to use in the predefined aberration range. :param no_realizations: int, How many realizations per rms error (OPD) should be calculated; the mean of the realizations is used. :param matrix_mode: string, Choice of PASTIS matrix to validate: 'analytical' or 'numerical' :return: """ # Keep track of time start_time = time.time() ########################## WORKDIRECTORY = "active" # you can chose here what data directory to work in # anything else than "active" works only with im_pastis=False rms_range = np.logspace(-1, 3, range_points) # Create range of RMS values to test realiz = no_realizations # how many random realizations per RMS values to do ########################## # Set up path for results outDir = os.path.join(CONFIG_PASTIS.get('local', 'local_data_path'), WORKDIRECTORY, 'results') os.makedirs(outDir, exist_ok=True) os.makedirs(os.path.join(outDir, 'dh_images_' + matrix_mode), exist_ok=True) # Loop over different RMS values and calculate contrast with PASTIS and E2E simulation e2e_contrasts = [] # contrasts from E2E sim am_contrasts = [] # contrasts from image PASTIS matrix_contrasts = [] # contrasts from matrix PASTIS log.info("RMS range: {}".format(rms_range, fmt="%e")) log.info(f"Random realizations: {realiz}") for i, rms in enumerate(rms_range): rms *= u.nm # Making sure this has the correct units e2e_rand = [] am_rand = [] matrix_rand = [] for j in range(realiz): log.info("\n#####################################") log.info("CALCULATING CONTRAST FOR {:.4f}".format(rms)) log.info(f"RMS {i + 1}/{len(rms_range)}") log.info(f"Random realization: {j+1}/{realiz}") log.info(f"Total: {(i*realiz)+(j+1)}/{len(rms_range)*realiz}\n") c_e2e, c_am, c_matrix = consim.contrast_jwst_ana_num( matdir=WORKDIRECTORY, matrix_mode=matrix_mode, rms=rms, im_pastis=True, plotting=True) e2e_rand.append(c_e2e) am_rand.append(c_am) matrix_rand.append(c_matrix) e2e_contrasts.append(np.mean(e2e_rand)) am_contrasts.append(np.mean(am_rand)) matrix_contrasts.append(np.mean(matrix_rand)) e2e_contrasts = np.array(e2e_contrasts) am_contrasts = np.array(am_contrasts) matrix_contrasts = np.array(matrix_contrasts) # Save results to txt file df = pd.DataFrame({ 'rms': rms_range, 'c_e2e': e2e_contrasts, 'c_am': am_contrasts, 'c_matrix': matrix_contrasts }) df.to_csv(os.path.join(outDir, "hockey_contrasts_" + matrix_mode + ".txt"), sep=' ', na_rep='NaN') # Plot plt.clf() plt.title("Contrast calculation") plt.plot(rms_range, e2e_contrasts, label="E2E") plt.plot(rms_range, am_contrasts, label="Image PASTIS") plt.plot(rms_range, matrix_contrasts, label="Matrix PASTIS") plt.semilogx() plt.semilogy() plt.xlabel("WFE RMS (OPD) in " + str(u.nm)) plt.ylabel("Contrast") plt.legend() #plt.show() plt.savefig( os.path.join(outDir, "PASTIS_HOCKEY_STICK_" + matrix_mode + ".pdf")) end_time = time.time() runtime = end_time - start_time log.info( f'Runtime for pastis_vs_e2e_contrast_calc.py: {runtime} sec = {runtime/60} min' )
plt.imshow(dh_psf, origin='lower') plt.title('JWST dark hole') plt.show() """ # dh_psf is the image of the dark hole only, the pixels outside of it are zero # intensity is the entire final image return dh_psf, intensity if __name__ == '__main__': "Testing the uncalibrated analytical model\n" ### Define the aberration coeffitients "coef" telescope = CONFIG_PASTIS.get('telescope', 'name') nb_seg = CONFIG_PASTIS.getint(telescope, 'nb_subapertures') zern_max = CONFIG_PASTIS.getint('zernikes', 'max_zern') nm_aber = CONFIG_PASTIS.getfloat( telescope, 'calibration_aberration') * u.nm # [nm] amplitude of aberration zern_number = CONFIG_PASTIS.getint( 'calibration', 'local_zernike') # Which (Noll) Zernike we are calibrating for wss_zern_nb = util.noll_to_wss( zern_number) # Convert from Noll to WSS framework ### What segmend are we aberrating? ### i = 0 # segment 1 --> i=0, seg 2 --> i=1, etc. cali = False # calibrated or not?