""" Launcher script to start a full HiCAT run: generate matrix and run full PASTIS analysis. """ import os from pastis.config import CONFIG_PASTIS from pastis.hockeystick_contrast_curve import hockeystick_curve from pastis.matrix_building_numerical import num_matrix_multiprocess from pastis.pastis_analysis import run_full_pastis_analysis import pastis.util as util if __name__ == '__main__': # Generate the matrix dir_run = num_matrix_multiprocess(instrument='HiCAT') # Alternatively, pick data location to run PASTIS analysis on #dir_run = os.path.join(CONFIG_PASTIS.get('local', 'local_data_path'), '2020-08-26T11-44-28_hicat') # Set up loggers for data analysis util.setup_pastis_logging(dir_run, 'pastis_analysis') # Then generate hockeystick curve result_dir = os.path.join(dir_run, 'results') matrix_dir = os.path.join(dir_run, 'matrix_numerical') hockeystick_curve(instrument='HiCAT', matrixdir=matrix_dir, resultdir=result_dir, range_points=10, no_realizations=3) # Finally run the analysis run_full_pastis_analysis(instrument='HiCAT', run_choice=dir_run, c_target=1e-7)
def num_matrix_multiprocess(instrument, design=None, savepsfs=True, saveopds=True): """ Generate a numerical/semi-analytical PASTIS matrix. Multiprocessed script to calculate PASTIS matrix. Implementation adapted from hicat.scripts.stroke_minimization.calculate_jacobian :param instrument: str, what instrument (LUVOIR, HiCAT, JWST) to generate the PASTIS matrix for :param design: str, optional, default=None, which means we read from the configfile: what coronagraph design to use - 'small', 'medium' or 'large' :param savepsfs: bool, if True, all PSFs will be saved to disk individually, as fits files. :param saveopds: bool, if True, all pupil surface maps of aberrated segment pairs will be saved to disk as PDF :return: overall_dir: string, experiment directory """ # Keep track of time start_time = time.time() # runtime is currently around 150 minutes ### Parameters # Create directory names tel_suffix = f'{instrument.lower()}' if instrument == 'LUVOIR': if design is None: design = CONFIG_PASTIS.get('LUVOIR', 'coronagraph_design') tel_suffix += f'-{design}' overall_dir = util.create_data_path(CONFIG_PASTIS.get('local', 'local_data_path'), telescope=tel_suffix) os.makedirs(overall_dir, exist_ok=True) resDir = os.path.join(overall_dir, 'matrix_numerical') # Create necessary directories if they don't exist yet os.makedirs(resDir, exist_ok=True) os.makedirs(os.path.join(resDir, 'OTE_images'), exist_ok=True) os.makedirs(os.path.join(resDir, 'psfs'), exist_ok=True) # Set up logger util.setup_pastis_logging(resDir, f'pastis_matrix_{tel_suffix}') log.info(f'Building numerical matrix for {tel_suffix}\n') # Read calibration aberration zern_number = CONFIG_PASTIS.getint('calibration', 'local_zernike') zern_mode = util.ZernikeMode(zern_number) # Create Zernike mode object for easier handling # General telescope parameters nb_seg = CONFIG_PASTIS.getint(instrument, 'nb_subapertures') seglist = util.get_segment_list(instrument) wvln = CONFIG_PASTIS.getfloat(instrument, 'lambda') * 1e-9 # m wfe_aber = CONFIG_PASTIS.getfloat(instrument, 'calibration_aberration') * 1e-9 # m # Record some of the defined parameters log.info(f'Instrument: {tel_suffix}') log.info(f'Wavelength: {wvln} m') log.info(f'Number of segments: {nb_seg}') log.info(f'Segment list: {seglist}') log.info(f'wfe_aber: {wfe_aber} m') log.info(f'Total number of segment pairs in {instrument} pupil: {len(list(util.segment_pairs_all(nb_seg)))}') log.info(f'Non-repeating pairs in {instrument} pupil calculated here: {len(list(util.segment_pairs_non_repeating(nb_seg)))}') # Copy configfile to resulting matrix directory util.copy_config(resDir) # Calculate coronagraph floor, and normalization factor from direct image contrast_floor, norm = calculate_unaberrated_contrast_and_normalization(instrument, design, return_coro_simulator=False, save_coro_floor=True, save_psfs=False, outpath=overall_dir) # Figure out how many processes is optimal and create a Pool. # Assume we're the only one on the machine so we can hog all the resources. # We expect numpy to use multithreaded math via the Intel MKL library, so # we check how many threads MKL will use, and create enough processes so # as to use 100% of the CPU cores. # You might think we should divide number of cores by 2 to get physical cores # to account for hyperthreading, however empirical testing on telserv3 shows that # it is slightly more performant on telserv3 to use all logical cores num_cpu = multiprocessing.cpu_count() # try: # import mkl # num_core_per_process = mkl.get_max_threads() # except ImportError: # # typically this is 4, so use that as default # log.info("Couldn't import MKL; guessing default value of 4 cores per process") # num_core_per_process = 4 num_core_per_process = 1 # NOTE: this was changed by Scott Will in HiCAT and makes more sense, somehow num_processes = int(num_cpu // num_core_per_process) log.info(f"Multiprocess PASTIS matrix for {instrument} will use {num_processes} processes (with {num_core_per_process} threads per process)") # Set up a function with all arguments fixed except for the last one, which is the segment pair tuple if instrument == 'LUVOIR': calculate_matrix_pair = functools.partial(_luvoir_matrix_one_pair, design, norm, wfe_aber, zern_mode, resDir, savepsfs, saveopds) if instrument == 'HiCAT': # Copy used BostonDM maps to matrix folder shutil.copytree(CONFIG_PASTIS.get('HiCAT', 'dm_maps_path'), os.path.join(resDir, 'hicat_boston_dm_commands')) calculate_matrix_pair = functools.partial(_hicat_matrix_one_pair, norm, wfe_aber, resDir, savepsfs, saveopds) if instrument == 'JWST': calculate_matrix_pair = functools.partial(_jwst_matrix_one_pair, norm, wfe_aber, resDir, savepsfs, saveopds) # Iterate over all segment pairs via a multiprocess pool mypool = multiprocessing.Pool(num_processes) t_start = time.time() results = mypool.map(calculate_matrix_pair, util.segment_pairs_non_repeating(nb_seg)) # this util function returns a generator t_stop = time.time() log.info(f"Multiprocess calculation complete in {t_stop-t_start}sec = {(t_stop-t_start)/60}min") # Unscramble results # results is a list of tuples that contain the return from the partial function, in this case: result[i] = (c, (seg1, seg2)) contrast_matrix = np.zeros([nb_seg, nb_seg]) # Generate empty matrix for i in range(len(results)): # Fill according entry in the matrix and subtract baseline contrast contrast_matrix[results[i][1][0], results[i][1][1]] = results[i][0] - contrast_floor mypool.close() # Save all contrasts to disk, WITH subtraction of coronagraph floor hcipy.write_fits(contrast_matrix, os.path.join(resDir, 'pair-wise_contrasts.fits')) plt.figure(figsize=(10, 10)) plt.imshow(contrast_matrix) plt.colorbar() plt.savefig(os.path.join(resDir, 'contrast_matrix.pdf')) # Calculate the PASTIS matrix from the contrast matrix: off-axis elements and normalization matrix_pastis = pastis_from_contrast_matrix(contrast_matrix, seglist, wfe_aber) # Save matrix to file filename_matrix = f'PASTISmatrix_num_{zern_mode.name}_{zern_mode.convention + str(zern_mode.index)}' hcipy.write_fits(matrix_pastis, os.path.join(resDir, filename_matrix + '.fits')) ppl.plot_pastis_matrix(matrix_pastis, wvln*1e9, out_dir=resDir, save=True) # convert wavelength to nm log.info(f'Matrix saved to: {os.path.join(resDir, filename_matrix + ".fits")}') # Tell us how long it took to finish. end_time = time.time() log.info(f'Runtime for matrix_building_numerical.py/multiprocess: {end_time - start_time}sec = {(end_time - start_time)/60}min') log.info(f'Data saved to {resDir}') return overall_dir
import pastis.util as util if __name__ == '__main__': # First generate a couple of matrices dir_small = num_matrix_multiprocess(instrument='LUVOIR', design='small') #dir_medium = num_matrix_multiprocess(instrument='LUVOIR', design='medium') #dir_large = num_matrix_multiprocess(instrument='LUVOIR', design='large') # Alternatively, pick data locations to run PASTIS analysis on #dir_small = os.path.join(CONFIG_PASTIS.get('local', 'local_data_path'), 'your-data-directory_small') #dir_medium = os.path.join(CONFIG_PASTIS.get('local', 'local_data_path'), 'your-data-directory_medium') #dir_large = os.path.join(CONFIG_PASTIS.get('local', 'local_data_path'), 'your-data-directory_medium') # Set up loggers for data analysis in all cases util.setup_pastis_logging(dir_small, 'pastis_analysis') #util.setup_pastis_logging(dir_medium, 'pastis_analysis') #util.setup_pastis_logging(dir_large, 'pastis_analysis') # Then generate all hockeystick curves result_dir_small = os.path.join(dir_small, 'results') matrix_dir_small = os.path.join(dir_small, 'matrix_numerical') hockeystick_curve(instrument='LUVOIR', apodizer_choice='small', matrixdir=matrix_dir_small, resultdir=result_dir_small, range_points=10, no_realizations=3) #result_dir_medium = os.path.join(dir_medium, 'results') #matrix_dir_medium = os.path.join(dir_medium, 'matrix_numerical')
def num_matrix_luvoir(design, savepsfs=False, saveopds=True): """ Generate a numerical PASTIS matrix for a LUVOIR A coronagraph. -- Depracated function, the LUVOIR PASTIS matrix is better calculated with num_matrix_multiprocess(), which can do this for your choice of one of the implemented instruments (LUVOIR, HiCAT, JWST). -- All inputs are read from the (local) configfile and saved to the specified output directory. The LUVOIR STDT delivery in May 2018 included three different apodizers we can work with, you pick which of the three you want with the 'design' parameter. :param design: string, what coronagraph design to use - 'small', 'medium' or 'large' :param savepsfs: bool, if True, all PSFs will be saved to disk individually, as fits files, additionally to the total PSF cube. If False, the total cube will still get saved at the very end of the script. :param saveopds: bool, if True, all pupil surface maps of aberrated segment pairs will be saved to disk as PDF :return overall_dir: string, experiment directory """ # Keep track of time start_time = time.time() ### Parameters # System parameters overall_dir = util.create_data_path(CONFIG_PASTIS.get('local', 'local_data_path'), telescope='luvoir-'+design) os.makedirs(overall_dir, exist_ok=True) resDir = os.path.join(overall_dir, 'matrix_numerical') # Create necessary directories if they don't exist yet os.makedirs(resDir, exist_ok=True) os.makedirs(os.path.join(resDir, 'OTE_images'), exist_ok=True) os.makedirs(os.path.join(resDir, 'psfs'), exist_ok=True) # Set up logger util.setup_pastis_logging(resDir, f'pastis_matrix_{design}') log.info('Building numerical matrix for LUVOIR\n') # Read calibration aberration zern_number = CONFIG_PASTIS.getint('calibration', 'local_zernike') zern_mode = util.ZernikeMode(zern_number) # Create Zernike mode object for easier handling # General telescope parameters nb_seg = CONFIG_PASTIS.getint('LUVOIR', 'nb_subapertures') wvln = CONFIG_PASTIS.getfloat('LUVOIR', 'lambda') * 1e-9 # m diam = CONFIG_PASTIS.getfloat('LUVOIR', 'diameter') # m wfe_aber = CONFIG_PASTIS.getfloat('LUVOIR', 'calibration_aberration') * 1e-9 # m # Image system parameters sampling = CONFIG_PASTIS.getfloat('LUVOIR', 'sampling') # Record some of the defined parameters log.info(f'LUVOIR apodizer design: {design}') log.info(f'Wavelength: {wvln} m') log.info(f'Telescope diameter: {diam} m') log.info(f'Number of segments: {nb_seg}') log.info(f'Sampling: {sampling} px per lambda/D') log.info(f'wfe_aber: {wfe_aber} m') # Copy configfile to resulting matrix directory util.copy_config(resDir) ### Instantiate Luvoir telescope with chosen apodizer design optics_input = CONFIG_PASTIS.get('LUVOIR', 'optics_path') luvoir = LuvoirAPLC(optics_input, design, sampling) ### Reference images for contrast normalization and coronagraph floor unaberrated_coro_psf, ref = luvoir.calc_psf(ref=True, display_intermediate=False, return_intermediate=False) norm = np.max(ref) dh_intensity = (unaberrated_coro_psf / norm) * luvoir.dh_mask contrast_floor = np.mean(dh_intensity[np.where(luvoir.dh_mask != 0)]) log.info(f'contrast floor: {contrast_floor}') ### Generating the PASTIS matrix and a list for all contrasts contrast_matrix = np.zeros([nb_seg, nb_seg]) # Generate empty matrix all_psfs = [] all_contrasts = [] for i in range(nb_seg): for j in range(nb_seg): log.info(f'\nSTEP: {i+1}-{j+1} / {nb_seg}-{nb_seg}') # Put aberration on correct segments. If i=j, apply only once! luvoir.flatten() luvoir.set_segment(i+1, wfe_aber/2, 0, 0) if i != j: luvoir.set_segment(j+1, wfe_aber/2, 0, 0) log.info('Calculating coro image...') image, inter = luvoir.calc_psf(ref=False, display_intermediate=False, return_intermediate='intensity') # Normalize PSF by reference image psf = image / norm all_psfs.append(psf.shaped) # Save image to disk if savepsfs: # TODO: I might want to change this to matplotlib images since I save the PSF cube anyway. filename_psf = f'psf_{zern_mode.name}_{zern_mode.convention + str(zern_mode.index)}_segs_{i+1}-{j+1}' hcipy.write_fits(psf, os.path.join(resDir, 'psfs', filename_psf + '.fits')) # Save OPD images for testing if saveopds: opd_name = f'opd_{zern_mode.name}_{zern_mode.convention + str(zern_mode.index)}_segs_{i+1}-{j+1}' plt.clf() hcipy.imshow_field(inter['seg_mirror'], mask=luvoir.aperture, cmap='RdBu') plt.savefig(os.path.join(resDir, 'OTE_images', opd_name + '.pdf')) log.info('Calculating mean contrast in dark hole') dh_intensity = psf * luvoir.dh_mask contrast = np.mean(dh_intensity[np.where(luvoir.dh_mask != 0)]) log.info(f'contrast: {float(contrast)}') # contrast is a Field, here casting to normal float all_contrasts.append(contrast) # Fill according entry in the matrix and subtract baseline contrast contrast_matrix[i,j] = contrast - contrast_floor # Transform saved lists to arrays all_psfs = np.array(all_psfs) all_contrasts = np.array(all_contrasts) # Save the PSF image *cube* as well (as opposed to each one individually) hcipy.write_fits(all_psfs, os.path.join(resDir, 'psfs', 'psf_cube.fits'),) np.savetxt(os.path.join(resDir, 'pair-wise_contrasts.txt'), all_contrasts, fmt='%e') # Filling the off-axis elements log.info('\nCalculating off-axis matrix elements...') matrix_two_N = np.copy(contrast_matrix) # This is just an intermediary copy so that I don't mix things up. matrix_pastis = np.copy(contrast_matrix) # This will be the final PASTIS matrix. for i in range(nb_seg): for j in range(nb_seg): if i != j: matrix_off_val = (matrix_two_N[i,j] - matrix_two_N[i,i] - matrix_two_N[j,j]) / 2. matrix_pastis[i,j] = matrix_off_val log.info(f'Off-axis for i{i+1}-j{j+1}: {matrix_off_val}') # Normalize matrix for the input aberration - this defines what units the PASTIS matrix will be in. The PASTIS # matrix propagation function (util.pastis_contrast()) then needs to take in the aberration vector in these same # units. I have chosen to keep this to 1nm, so, we normalize the PASTIS matrix to units of nanometers. matrix_pastis /= np.square(wfe_aber * 1e9) # 1e9 converts the calibration aberration back to nanometers # Save matrix to file filename_matrix = f'PASTISmatrix_num_{zern_mode.name}_{zern_mode.convention + str(zern_mode.index)}' hcipy.write_fits(matrix_pastis, os.path.join(resDir, filename_matrix + '.fits')) log.info(f'Matrix saved to: {os.path.join(resDir, filename_matrix + ".fits")}') # Tell us how long it took to finish. end_time = time.time() log.info(f'Runtime for matrix_building.py: {end_time - start_time}sec = {(end_time - start_time) / 60}min') log.info(f'Data saved to {resDir}') return overall_dir