def contrast_luvoir_num(apodizer_choice,
                        matrix_dir,
                        matrix_mode='luvoir',
                        rms=1 * u.nm):
    """
    Compute the contrast for a random SM mislignment on the LUVOIR simulator.
    :param matrix_dir: str, directory of saved matrix
    :param matrix_mode: str, analytical or numerical; currently only numerical supported
    :param rms: astropy quantity, rms wfe to be put randomly on the SM
    :return: 2x float, E2E and matrix contrast
    """

    # Keep track of time
    start_time = time.time()  # runtime currently is around ? min

    # Parameters
    nb_seg = CONFIG_INI.getint('LUVOIR', 'nb_subapertures')
    sampling = 4

    # Import numerical PASTIS matrix for HiCAT sim
    filename = 'PASTISmatrix_num_piston_Noll1'
    matrix_pastis = fits.getdata(os.path.join(matrix_dir, filename + '.fits'))

    # Create random aberration coefficients
    aber = np.random.random([nb_seg])  # piston values in input units
    print('PISTON ABERRATIONS:', aber)

    # Normalize to the RMS value I want
    rms_init = util.rms(aber)
    aber *= rms.value / rms_init
    calc_rms = util.rms(aber) * u.nm
    aber *= u.nm  # making sure the aberration has the correct units
    print("Calculated RMS:", calc_rms)

    # Remove global piston
    aber -= np.mean(aber)

    # Coronagraph parameters
    # The LUVOIR STDT delivery in May 2018 included three different apodizers
    # we can work with, so I will implement an easy way of making a choice between them.
    design = apodizer_choice
    optics_input = '/Users/ilaginja/Documents/LabWork/ultra/LUVOIR_delivery_May2019/'

    # Instantiate LUVOIR telescope with APLC
    luvoir = LuvoirAPLC(optics_input, design, sampling)

    ### BASELINE PSF - NO ABERRATIONS, NO CORONAGRAPH
    # and coro PSF without aberrations
    start_e2e = time.time()
    print('Generating baseline PSF from E2E - no coronagraph, no aberrations')
    print('Also generating coro PSF without aberrations')
    psf_perfect, ref = luvoir.calc_psf(ref=True)
    normp = np.max(ref)
    psf_coro = psf_perfect / normp

    print('Calculating E2E contrast...')
    # Put aberrations on segmented mirror
    for nseg in range(nb_seg):
        luvoir.set_segment(nseg + 1, aber[nseg].to(u.m).value / 2, 0, 0)

    psf_luvoir = luvoir.calc_psf()
    psf_luvoir /= normp

    # Create DH
    dh_outer = hc.circular_aperture(2 * luvoir.apod_dict[design]['owa'] *
                                    luvoir.lam_over_d)(luvoir.focal_det)
    dh_inner = hc.circular_aperture(2 * luvoir.apod_dict[design]['iwa'] *
                                    luvoir.lam_over_d)(luvoir.focal_det)
    dh_mask = (dh_outer - dh_inner).astype('bool')

    # Get the mean contrast
    dh_intensity = psf_luvoir * dh_mask
    contrast_luvoir = np.mean(dh_intensity[np.where(dh_intensity != 0)])
    end_e2e = time.time()

    ###
    # Calculate baseline contrast
    baseline_dh = psf_coro * dh_mask
    contrast_base = np.mean(baseline_dh[np.where(baseline_dh != 0)])
    print('Baseline contrast: {}'.format(contrast_base))

    ## MATRIX PASTIS
    print('Generating contrast from matrix-PASTIS')
    start_matrixpastis = time.time()
    # Get mean contrast from matrix PASTIS
    contrast_matrix = util.pastis_contrast(
        aber, matrix_pastis
    ) + contrast_base  # calculating contrast with PASTIS matrix model
    end_matrixpastis = time.time()

    ## Outputs
    print('\n--- CONTRASTS: ---')
    print('Mean contrast from E2E:', contrast_luvoir)
    print('Contrast from matrix PASTIS:', contrast_matrix)

    print('\n--- RUNTIMES: ---')
    print('E2E: ', end_e2e - start_e2e, 'sec =', (end_e2e - start_e2e) / 60,
          'min')
    print('Matrix PASTIS: ', end_matrixpastis - start_matrixpastis, 'sec =',
          (end_matrixpastis - start_matrixpastis) / 60, 'min')

    end_time = time.time()
    runtime = end_time - start_time
    print('Runtime for contrast_calculation_simple.py: {} sec = {} min'.format(
        runtime, runtime / 60))

    return contrast_luvoir, contrast_matrix
def ana_matrix_jwst():

    # Keep track of time
    start_time = time.time()  # runtime is currently around 11 minutes
    print('Building analytical matrix for JWST\n')

    # Parameters
    datadir = os.path.join(CONFIG_INI.get('local', 'local_data_path'),
                           'active')
    which_tel = CONFIG_INI.get('telescope', 'name')
    resDir = os.path.join(datadir, 'matrix_analytical')
    nb_seg = CONFIG_INI.getint(which_tel, 'nb_subapertures')
    nm_aber = CONFIG_INI.getfloat('calibration', 'single_aberration') * u.nm
    zern_number = CONFIG_INI.getint('calibration',
                                    'zernike')  # Noll convention!
    zern_mode = util.ZernikeMode(
        zern_number)  # Create Zernike mode object for easier handling

    # If subfolder "matrix_analytical" doesn't exist yet, create it.
    if not os.path.isdir(resDir):
        os.mkdir(resDir)

    #-# Generating the PASTIS matrix
    matrix_direct = np.zeros(
        [nb_seg,
         nb_seg])  # Generate empty matrix for contrast values from loop.
    all_ims = []
    all_dhs = []
    all_contrasts = []

    for i in range(nb_seg):
        for j in range(nb_seg):

            print('STEP: {}-{} / {}-{}'.format(i + 1, j + 1, nb_seg, nb_seg))

            # Putting aberration only on segments i and j
            tempA = np.zeros([nb_seg])
            tempA[i] = nm_aber.value
            tempA[j] = nm_aber.value
            tempA *= u.nm  # making sure this array has the right units

            # Create PASTIS image and save full image as well as DH image
            temp_im_am, full_psf = impastis.analytical_model(zern_number,
                                                             tempA,
                                                             cali=True)

            filename_psf = 'psf_' + zern_mode.name + '_' + zern_mode.convention + str(
                zern_mode.index) + '_segs_' + str(i + 1) + '-' + str(j + 1)
            util.write_fits(full_psf,
                            os.path.join(resDir, 'psfs',
                                         filename_psf + '.fits'),
                            header=None,
                            metadata=None)
            all_ims.append(full_psf)

            filename_dh = 'dh_' + zern_mode.name + '_' + zern_mode.convention + str(
                zern_mode.index) + '_segs_' + str(i + 1) + '-' + str(j + 1)
            util.write_fits(temp_im_am,
                            os.path.join(resDir, 'darkholes',
                                         filename_dh + '.fits'),
                            header=None,
                            metadata=None)
            all_dhs.append(temp_im_am)

            contrast = np.mean(temp_im_am[np.where(temp_im_am != 0)])
            matrix_direct[i, j] = contrast
            print('contrast =', contrast)
            all_contrasts.append(contrast)

    all_ims = np.array(all_ims)
    all_dhs = np.array(all_dhs)
    all_contrasts = np.array(all_contrasts)

    # Filling the off-axis elements
    matrix_two_N = np.copy(
        matrix_direct
    )  # This is just an intermediary copy so that I don't mix things up.
    matrix_pastis = np.copy(
        matrix_direct)  # This will be the final PASTIS matrix.

    for i in range(nb_seg):
        for j in range(nb_seg):
            if i != j:
                matrix_off_val = (matrix_two_N[i, j] - matrix_two_N[i, i] -
                                  matrix_two_N[j, j]) / 2.
                matrix_pastis[i, j] = matrix_off_val
                print('Off-axis for i{}-j{}: {}'.format(
                    i + 1, j + 1, matrix_off_val))

    # Normalize matrix for the input aberration
    matrix_pastis /= np.square(nm_aber.value)

    # Save matrix to file
    filename = 'PASTISmatrix_' + zern_mode.name + '_' + zern_mode.convention + str(
        zern_mode.index)
    util.write_fits(matrix_pastis,
                    os.path.join(resDir, filename + '.fits'),
                    header=None,
                    metadata=None)
    print('Matrix saved to:', os.path.join(resDir, filename + '.fits'))

    # Save the PSF and DH image *cubes* as well (as opposed to each one individually)
    util.write_fits(all_ims,
                    os.path.join(resDir, 'psfs', 'psf_cube' + '.fits'),
                    header=None,
                    metadata=None)
    util.write_fits(all_dhs,
                    os.path.join(resDir, 'darkholes', 'dh_cube' + '.fits'),
                    header=None,
                    metadata=None)
    np.savetxt(os.path.join(resDir, 'contrasts.txt'), all_contrasts, fmt='%e')

    # Tell us how long it took to finish.
    end_time = time.time()
    print('Runtime for matrix_building.py:', end_time - start_time, 'sec =',
          (end_time - start_time) / 60, 'min')
    print('Data saved to {}'.format(resDir))
def contrast_hicat_num(matrix_dir, matrix_mode='hicat', rms=1 * u.nm):
    """
    Compute the contrast for a random IrisAO mislignment on the HiCAT simulator.
    :param matrix_dir: str, directory of saved matrix
    :param matrix_mode: str, analytical or numerical; currently only numerical supported
    :param rms: astropy quantity, rms wfe to be put randomly on the SM
    :return: 2x float, E2E and matrix contrast
    """
    import hicat.simulators

    # Keep track of time
    start_time = time.time()  # runtime currently is around 12 min

    # Parameters
    nb_seg = CONFIG_INI.getint('HiCAT', 'nb_subapertures')
    iwa = CONFIG_INI.getfloat('HiCAT', 'IWA')
    owa = CONFIG_INI.getfloat('HiCAT', 'OWA')

    # Import numerical PASTIS matrix for HiCAT sim
    filename = 'PASTISmatrix_num_HiCAT_piston_Noll1'
    matrix_pastis = fits.getdata(os.path.join(matrix_dir, filename + '.fits'))

    # Create random aberration coefficients
    aber = np.random.random([nb_seg])  # piston values in input units
    print('PISTON ABERRATIONS:', aber)

    # Normalize to the RMS value I want
    rms_init = util.rms(aber)
    aber *= rms.value / rms_init
    calc_rms = util.rms(aber) * u.nm
    aber *= u.nm  # making sure the aberration has the correct units
    print("Calculated RMS:", calc_rms)

    # Remove global piston
    aber -= np.mean(aber)

    ### BASELINE PSF - NO ABERRATIONS, NO CORONAGRAPH
    print('Generating baseline PSF from E2E - no coronagraph, no aberrations')
    hc = hicat.simulators.hicat_sim.HICAT_Sim()
    hc.iris_ao = 'iris_ao'
    hc.apodizer = 'cnt1_apodizer'
    hc.lyot_stop = 'cnt1_apodizer_lyot_stop'
    hc.include_fpm = False

    psf_perfect = hc.calc_psf(display=False, return_intermediates=False)
    normp = np.max(psf_perfect[0].data)
    #psf_perfect = psf_perfect[0].data / normp   don't actually need the perfect PSF

    ### HiCAT sim
    start_e2e = time.time()
    # Set up the HiCAT simulator, get PSF
    hc.apodizer = 'cnt1_apodizer'
    hc.lyot_stop = 'cnt1_apodizer_lyot_stop'
    hc.include_fpm = True

    # Calculate coro PSF without aberrations
    psf_coro = hc.calc_psf(display=False, return_intermediates=False)
    psf_coro = psf_coro[0].data / normp

    print('Calculating E2E contrast...')
    # Put aberration on Iris AO
    for nseg in range(nb_seg):
        hc.iris_dm.set_actuator(nseg + 1, aber[nseg], 0, 0)

    psf_hicat = hc.calc_psf(display=False, return_intermediates=False)
    psf_hicat = psf_hicat[0].data / normp

    # Create DH
    dh_mask = util.create_dark_hole(psf_hicat, iwa=iwa, owa=owa, samp=13 / 4)
    # Get the mean contrast
    hicat_dh_psf = psf_hicat * dh_mask
    contrast_hicat = np.mean(hicat_dh_psf[np.where(hicat_dh_psf != 0)])
    end_e2e = time.time()

    ###
    # Calculate baseline contrast
    baseline_dh = psf_coro * dh_mask
    contrast_base = np.mean(baseline_dh[np.where(baseline_dh != 0)])

    ## MATRIX PASTIS
    print('Generating contrast from matrix-PASTIS')
    start_matrixpastis = time.time()
    # Get mean contrast from matrix PASTIS
    contrast_matrix = util.pastis_contrast(
        aber, matrix_pastis
    ) + contrast_base  # calculating contrast with PASTIS matrix model
    end_matrixpastis = time.time()

    ## Outputs
    print('\n--- CONTRASTS: ---')
    print('Mean contrast from E2E:', contrast_hicat)
    print('Contrast from matrix PASTIS:', contrast_matrix)

    print('\n--- RUNTIMES: ---')
    print('E2E: ', end_e2e - start_e2e, 'sec =', (end_e2e - start_e2e) / 60,
          'min')
    print('Matrix PASTIS: ', end_matrixpastis - start_matrixpastis, 'sec =',
          (end_matrixpastis - start_matrixpastis) / 60, 'min')

    end_time = time.time()
    runtime = end_time - start_time
    print('Runtime for contrast_calculation_simple.py: {} sec = {} min'.format(
        runtime, runtime / 60))

    return contrast_hicat, contrast_matrix
def contrast_jwst_ana_num(matdir,
                          matrix_mode="analytical",
                          rms=1. * u.nm,
                          im_pastis=False,
                          plotting=False):
    """
    Calculate the contrast for an RMS WFE with image PASTIS, matrix PASTIS
    :param matdir: data directory to use for matrix and calibration coefficients from
    :param matrix_mode: use 'analytical or 'numerical' matrix
    :param rms: RMS wavefront error in pupil to calculate contrast for; in NANOMETERS
    :param im_pastis: default False, whether to also calculate contrast from image PASTIS
    :param plotting: default False, whether to save E2E and PASTIS DH PSFs; works only if im_pastis=True
    :return:
    """
    from e2e_simulators import webbpsf_imaging as webbim

    print("THIS ONLY WORKS FOR PISTON FOR NOW")

    # Keep track of time
    start_time = time.time()  # runtime currently is around 12 min

    # Parameters
    dataDir = os.path.join(CONFIG_INI.get('local', 'local_data_path'), matdir)
    which_tel = CONFIG_INI.get('telescope', 'name')
    nb_seg = CONFIG_INI.getint(which_tel, 'nb_subapertures')
    filter = CONFIG_INI.get(which_tel, 'filter_name')
    fpm = CONFIG_INI.get(which_tel, 'focal_plane_mask')  # focal plane mask
    lyot_stop = CONFIG_INI.get(which_tel, 'pupil_plane_stop')  # Lyot stop
    inner_wa = CONFIG_INI.getint(which_tel, 'IWA')
    outer_wa = CONFIG_INI.getint(which_tel, 'OWA')
    tel_size_px = CONFIG_INI.getint('numerical', 'tel_size_px')
    sampling = CONFIG_INI.getfloat('numerical', 'sampling')
    #real_samp = sampling * tel_size_px / im_size
    zern_number = CONFIG_INI.getint('calibration', 'zernike')
    zern_mode = util.ZernikeMode(zern_number)
    zern_max = CONFIG_INI.getint('zernikes', 'max_zern')

    # Import PASTIS matrix
    matrix_pastis = None
    if matrix_mode == 'numerical':
        filename = 'PASTISmatrix_num_' + zern_mode.name + '_' + zern_mode.convention + str(
            zern_mode.index)
        matrix_pastis = fits.getdata(
            os.path.join(dataDir, 'matrix_numerical', filename + '.fits'))
    elif matrix_mode == 'analytical':
        filename = 'PASTISmatrix_' + zern_mode.name + '_' + zern_mode.convention + str(
            zern_mode.index)
        matrix_pastis = fits.getdata(
            os.path.join(dataDir, 'matrix_analytical', filename + '.fits'))

    # Create random aberration coefficients
    aber = np.random.random([nb_seg])  # piston values in input units
    #print('PISTON ABERRATIONS:', aber)

    # Normalize to the RMS value I want
    rms_init = util.rms(aber)
    aber *= rms.value / rms_init
    calc_rms = util.rms(aber) * u.nm
    aber *= u.nm  # making sure the aberration has the correct units
    print("Calculated RMS:", calc_rms)

    # Remove global piston
    aber -= np.mean(aber)

    # Make equivalent aberration array that goes into the WebbPSF function
    Aber_WSS = np.zeros([nb_seg, zern_max])
    Aber_WSS[:, 0] = aber.to(
        u.m
    ).value  # index "0" works because we're using piston currently; convert to meters

    ### BASELINE PSF - NO ABERRATIONS, NO CORONAGRAPH
    print('Generating baseline PSF from E2E - no coronagraph, no aberrations')
    psf_perfect = webbim.nircam_nocoro(filter, np.zeros_like(Aber_WSS))
    normp = np.max(psf_perfect)
    psf_perfect = psf_perfect / normp

    ### WEBBPSF
    print('Generating E2E coro contrast')
    start_webb = time.time()
    # Set up NIRCam and coronagraph, get PSF
    psf_webbpsf = webbim.nircam_coro(filter, fpm, lyot_stop, Aber_WSS)
    psf_webbpsf = psf_webbpsf / normp
    # Create dark hole
    dh_area = util.create_dark_hole(psf_webbpsf, inner_wa, outer_wa, sampling)
    # Get the mean contrast from the WebbPSF coronagraph
    webb_dh_psf = psf_webbpsf * dh_area
    contrast_webbpsf = np.mean(webb_dh_psf[np.where(webb_dh_psf != 0)])
    end_webb = time.time()

    #TODO: save plots of phase on segmented pupil

    # Load in baseline contrast
    contrastname = 'base-contrast_' + zern_mode.name + '_' + zern_mode.convention + str(
        zern_mode.index)
    contrast_base = float(
        np.loadtxt(os.path.join(dataDir, 'calibration',
                                contrastname + '.txt')))

    ### IMAGE PASTIS
    contrast_am = np.nan
    if im_pastis:
        print('Generating contrast from image-PASTIS')
        start_impastis = time.time()
        # Create calibrated image from analytical model
        psf_am, full_psf = impastis.analytical_model(zern_number,
                                                     aber,
                                                     cali=True)
        # Get the mean contrast from image PASTIS
        contrast_am = np.mean(psf_am[np.where(psf_am != 0)]) + contrast_base
        end_impastis = time.time()

    ### MATRIX PASTIS
    print('Generating contrast from matrix-PASTIS')
    start_matrixpastis = time.time()
    # Get mean contrast from matrix PASTIS
    contrast_matrix = util.pastis_contrast(
        aber, matrix_pastis
    ) + contrast_base  # calculating contrast with PASTIS matrix model
    end_matrixpastis = time.time()

    ratio = None
    if im_pastis:
        ratio = contrast_am / contrast_matrix

    # Outputs
    print('\n--- CONTRASTS: ---')
    print('Mean contrast from E2E:', contrast_webbpsf)
    print('Mean contrast with image PASTIS:', contrast_am)
    print('Contrast from matrix PASTIS:', contrast_matrix)
    print('Ratio image PASTIS / matrix PASTIS:', ratio)

    print('\n--- RUNTIMES: ---')
    print('E2E: ', end_webb - start_webb, 'sec =',
          (end_webb - start_webb) / 60, 'min')
    if im_pastis:
        print('Image PASTIS: ', end_impastis - start_impastis, 'sec =',
              (end_impastis - start_impastis) / 60, 'min')
    print('Matrix PASTIS: ', end_matrixpastis - start_matrixpastis, 'sec =',
          (end_matrixpastis - start_matrixpastis) / 60, 'min')

    end_time = time.time()
    runtime = end_time - start_time
    print('Runtime for contrast_calculation_simple.py: {} sec = {} min'.format(
        runtime, runtime / 60))

    # Save the PSFs
    if im_pastis:
        if plotting:

            # As fits files
            util.write_fits(
                util.zoom_cen(webb_dh_psf, psf_am.shape[0] / 2),
                os.path.join(
                    dataDir, 'results', 'dh_images_' + matrix_mode,
                    '{:.2e}'.format(rms.value) + str(rms.unit) +
                    'RMS_e2e.fits'))
            util.write_fits(
                psf_am,
                os.path.join(
                    dataDir, 'results', 'dh_images_' + matrix_mode,
                    '{:.2e}'.format(rms.value) + str(rms.unit) +
                    'RMS_am.fits'))

            # As PDF plot
            plt.clf()
            plt.figure()
            plt.suptitle('{:.2e}'.format(rms.value) + str(rms.unit) + " RMS")
            plt.subplot(1, 2, 1)
            plt.title("E2E")
            plt.imshow(util.zoom_cen(webb_dh_psf, psf_am.shape[0] / 2),
                       norm=LogNorm())
            plt.colorbar()
            plt.subplot(1, 2, 2)
            plt.title("PASTIS image")
            plt.imshow(psf_am, norm=LogNorm())
            plt.colorbar()
            plt.savefig(
                os.path.join(dataDir, 'results', 'dh_images_' + matrix_mode,
                             '{:.2e}'.format(rms.value) + 'DH_PSFs.pdf'))
            #TODO: check image rotation, I think there is a 90 degree difference in them for the JWST simulations

    return contrast_webbpsf, contrast_am, contrast_matrix
Ejemplo n.º 5
0
def num_matrix_jwst():
    """
    Generate a numerical PASTIS matrix for a JWST coronagraph.

    All inputs are read from the (local) configfile and saved to the specified output directory.
    """

    import webbpsf
    from e2e_simulators import webbpsf_imaging as webbim

    # Keep track of time
    start_time = time.time()  # runtime is currently around 21 minutes
    print('Building numerical matrix for JWST\n')

    # Parameters
    resDir = os.path.join(CONFIG_INI.get('local', 'local_data_path'), 'active',
                          'matrix_numerical')
    which_tel = CONFIG_INI.get('telescope', 'name')
    nb_seg = CONFIG_INI.getint(which_tel, 'nb_subapertures')
    im_size_e2e = CONFIG_INI.getint('numerical', 'im_size_px_webbpsf')
    inner_wa = CONFIG_INI.getint(which_tel, 'IWA')
    outer_wa = CONFIG_INI.getint(which_tel, 'OWA')
    sampling = CONFIG_INI.getfloat('numerical', 'sampling')
    fpm = CONFIG_INI.get(which_tel, 'focal_plane_mask')  # focal plane mask
    lyot_stop = CONFIG_INI.get(which_tel, 'pupil_plane_stop')  # Lyot stop
    filter = CONFIG_INI.get(which_tel, 'filter_name')
    nm_aber = CONFIG_INI.getfloat('calibration', 'single_aberration') * u.nm
    wss_segs = webbpsf.constants.SEGNAMES_WSS_ORDER
    zern_max = CONFIG_INI.getint('zernikes', 'max_zern')
    zern_number = CONFIG_INI.getint('calibration', 'zernike')
    zern_mode = util.ZernikeMode(
        zern_number)  # Create Zernike mode object for easier handling
    wss_zern_nb = util.noll_to_wss(
        zern_number)  # Convert from Noll to WSS framework

    # If subfolder "matrix_numerical" doesn't exist yet, create it.
    if not os.path.isdir(resDir):
        os.mkdir(resDir)

    # If subfolder "OTE_images" doesn't exist yet, create it.
    if not os.path.isdir(os.path.join(resDir, 'OTE_images')):
        os.mkdir(os.path.join(resDir, 'OTE_images'))

    # If subfolder "psfs" doesn't exist yet, create it.
    if not os.path.isdir(os.path.join(resDir, 'psfs')):
        os.mkdir(os.path.join(resDir, 'psfs'))

    # If subfolder "darkholes" doesn't exist yet, create it.
    if not os.path.isdir(os.path.join(resDir, 'darkholes')):
        os.mkdir(os.path.join(resDir, 'darkholes'))

    # Create the dark hole mask.
    pup_im = np.zeros([im_size_e2e, im_size_e2e
                       ])  # this is just used for DH mask generation
    dh_area = util.create_dark_hole(pup_im, inner_wa, outer_wa, sampling)

    # Create a direct WebbPSF image for normalization factor
    fake_aber = np.zeros([nb_seg, zern_max])
    psf_perfect = webbim.nircam_nocoro(filter, fake_aber)
    normp = np.max(psf_perfect)
    psf_perfect = psf_perfect / normp

    # Set up NIRCam coro object from WebbPSF
    nc_coro = webbpsf.NIRCam()
    nc_coro.filter = filter
    nc_coro.image_mask = fpm
    nc_coro.pupil_mask = lyot_stop

    # Null the OTE OPDs for the PSFs, maybe we will add internal WFE later.
    nc_coro, ote_coro = webbpsf.enable_adjustable_ote(
        nc_coro)  # create OTE for coronagraph
    nc_coro.include_si_wfe = False  # set SI internal WFE to zero

    #-# Generating the PASTIS matrix and a list for all contrasts
    matrix_direct = np.zeros([nb_seg, nb_seg])  # Generate empty matrix
    all_psfs = []
    all_dhs = []
    all_contrasts = []

    print('nm_aber: {}'.format(nm_aber))

    for i in range(nb_seg):
        for j in range(nb_seg):

            print('\nSTEP: {}-{} / {}-{}'.format(i + 1, j + 1, nb_seg, nb_seg))

            # Get names of segments, they're being addressed by their names in the ote functions.
            seg_i = wss_segs[i].split('-')[0]
            seg_j = wss_segs[j].split('-')[0]

            # Put the aberration on the correct segments
            Aber_WSS = np.zeros([
                nb_seg, zern_max
            ])  # The Zernikes here will be filled in the WSS order!!!
            # Because it goes into _apply_hexikes_to_seg().
            Aber_WSS[i, wss_zern_nb - 1] = nm_aber.to(
                u.m
            ).value  # Aberration on the segment we're currently working on;
            # convert to meters; -1 on the Zernike because Python starts
            # numbering at 0.
            Aber_WSS[j, wss_zern_nb - 1] = nm_aber.to(
                u.m).value  # same for other segment

            # Putting aberrations on segments i and j
            ote_coro.reset(
            )  # Making sure there are no previous movements on the segments.
            ote_coro.zero()  # set OTE for coronagraph to zero

            # Apply both aberrations to OTE. If i=j, apply only once!
            ote_coro._apply_hexikes_to_seg(seg_i, Aber_WSS[
                i, :])  # set segment i  (segment numbering starts at 1)
            if i != j:
                ote_coro._apply_hexikes_to_seg(seg_j,
                                               Aber_WSS[j, :])  # set segment j

            # If you want to display it:
            # ote_coro.display_opd()
            # plt.show()

            # Save OPD images for testing
            opd_name = 'opd_' + zern_mode.name + '_' + zern_mode.convention + str(
                zern_mode.index) + '_segs_' + str(i + 1) + '-' + str(j + 1)
            plt.clf()
            ote_coro.display_opd()
            plt.savefig(os.path.join(resDir, 'OTE_images', opd_name + '.pdf'))

            print('Calculating WebbPSF image')
            image = nc_coro.calc_psf(fov_pixels=int(im_size_e2e),
                                     oversample=1,
                                     nlambda=1)
            psf = image[0].data / normp

            # Save WebbPSF image to disk
            filename_psf = 'psf_' + zern_mode.name + '_' + zern_mode.convention + str(
                zern_mode.index) + '_segs_' + str(i + 1) + '-' + str(j + 1)
            util.write_fits(psf,
                            os.path.join(resDir, 'psfs',
                                         filename_psf + '.fits'),
                            header=None,
                            metadata=None)
            all_psfs.append(psf)

            print('Calculating mean contrast in dark hole')
            dh_intensity = psf * dh_area
            contrast = np.mean(dh_intensity[np.where(dh_intensity != 0)])
            print('contrast:', contrast)

            # Save DH image to disk and put current contrast in list
            filename_dh = 'dh_' + zern_mode.name + '_' + zern_mode.convention + str(
                zern_mode.index) + '_segs_' + str(i + 1) + '-' + str(j + 1)
            util.write_fits(dh_intensity,
                            os.path.join(resDir, 'darkholes',
                                         filename_dh + '.fits'),
                            header=None,
                            metadata=None)
            all_dhs.append(dh_intensity)
            all_contrasts.append(contrast)

            # Fill according entry in the matrix
            matrix_direct[i, j] = contrast

    # Transform saved lists to arrays
    all_psfs = np.array(all_psfs)
    all_dhs = np.array(all_dhs)
    all_contrasts = np.array(all_contrasts)

    # Filling the off-axis elements
    matrix_two_N = np.copy(
        matrix_direct
    )  # This is just an intermediary copy so that I don't mix things up.
    matrix_pastis = np.copy(
        matrix_direct)  # This will be the final PASTIS matrix.

    for i in range(nb_seg):
        for j in range(nb_seg):
            if i != j:
                matrix_off_val = (matrix_two_N[i, j] - matrix_two_N[i, i] -
                                  matrix_two_N[j, j]) / 2.
                matrix_pastis[i, j] = matrix_off_val
                print('Off-axis for i{}-j{}: {}'.format(
                    i + 1, j + 1, matrix_off_val))

    # Normalize matrix for the input aberration
    matrix_pastis /= np.square(nm_aber.value)

    # Save matrix to file
    filename_matrix = 'PASTISmatrix_num_' + zern_mode.name + '_' + zern_mode.convention + str(
        zern_mode.index)
    util.write_fits(matrix_pastis,
                    os.path.join(resDir, filename_matrix + '.fits'),
                    header=None,
                    metadata=None)
    print('Matrix saved to:', os.path.join(resDir, filename_matrix + '.fits'))

    # Save the PSF and DH image *cubes* as well (as opposed to each one individually)
    util.write_fits(all_psfs,
                    os.path.join(resDir, 'psfs', 'psf_cube' + '.fits'),
                    header=None,
                    metadata=None)
    util.write_fits(all_dhs,
                    os.path.join(resDir, 'darkholes', 'dh_cube' + '.fits'),
                    header=None,
                    metadata=None)
    np.savetxt(os.path.join(resDir, 'contrasts.txt'), all_contrasts, fmt='%e')

    # Tell us how long it took to finish.
    end_time = time.time()
    print('Runtime for matrix_building.py:', end_time - start_time, 'sec =',
          (end_time - start_time) / 60, 'min')
    print('Data saved to {}'.format(resDir))
Ejemplo n.º 6
0
import util_pastis as util
import image_pastis as impastis

if __name__ == '__main__':

    # Keep track of time
    start_time = time.time()  # runtime currently is around 3 minutes

    # Parameters
    outDir = os.path.join(CONFIG_INI.get('local', 'local_data_path'), 'active',
                          'calibration')
    telescope = CONFIG_INI.get('telescope', 'name')
    fpm = CONFIG_INI.get(telescope, 'focal_plane_mask')  # focal plane mask
    lyot_stop = CONFIG_INI.get(telescope, 'pupil_plane_stop')  # Lyot stop
    filter = CONFIG_INI.get(telescope, 'filter_name')
    tel_size_px = CONFIG_INI.getint('numerical', 'tel_size_px')
    im_size_e2e = CONFIG_INI.getint('numerical', 'im_size_px_webbpsf')
    size_seg = CONFIG_INI.getint('numerical', 'size_seg')
    nb_seg = CONFIG_INI.getint(telescope, 'nb_subapertures')
    wss_segs = webbpsf.constants.SEGNAMES_WSS_ORDER
    zern_max = CONFIG_INI.getint('zernikes', 'max_zern')
    inner_wa = CONFIG_INI.getint(telescope, 'IWA')
    outer_wa = CONFIG_INI.getint(telescope, 'OWA')
    sampling = CONFIG_INI.getfloat('numerical', 'sampling')

    if telescope == 'JWST':
        # Setting to ensure that PyCharm finds the webbpsf-data folder. If you don't know where it is, find it with:
        # webbpsf.utils.get_webbpsf_data_path()
        # --> e.g.: >>source activate astroconda   >>ipython   >>import webbpsf   >>webbpsf.utils.get_webbpsf_data_path()
        os.environ['WEBBPSF_PATH'] = CONFIG_INI.get('local',
                                                    'webbpsf_data_path')
Ejemplo n.º 7
0
def analytical_model(zernike_pol, coef, cali=False):
    """

    :param zernike_pol:
    :param coef:
    :param cali: bool; True if we already have calibration coefficients to use. False if we still need to create them.
    :return:
    """

    #-# Parameters
    dataDir = os.path.join(CONFIG_INI.get('local', 'local_data_path'),
                           'active')
    telescope = CONFIG_INI.get('telescope', 'name')
    nb_seg = CONFIG_INI.getint(telescope, 'nb_subapertures')
    tel_size_m = CONFIG_INI.getfloat(telescope, 'diameter') * u.m
    real_size_seg = CONFIG_INI.getfloat(
        telescope, 'flat_to_flat'
    )  # in m, size in meters of an individual segment flatl to flat
    size_seg = CONFIG_INI.getint(
        'numerical',
        'size_seg')  # pixel size of an individual segment tip to tip
    wvln = CONFIG_INI.getint(telescope, 'lambda') * u.nm
    inner_wa = CONFIG_INI.getint(telescope, 'IWA')
    outer_wa = CONFIG_INI.getint(telescope, 'OWA')
    tel_size_px = CONFIG_INI.getint(
        'numerical', 'tel_size_px')  # pupil diameter of telescope in pixels
    im_size_pastis = CONFIG_INI.getint(
        'numerical', 'im_size_px_pastis')  # image array size in px
    sampling = CONFIG_INI.getfloat('numerical', 'sampling')  # sampling
    size_px_tel = tel_size_m / tel_size_px  # size of one pixel in pupil plane in m
    px_sq_to_rad = (size_px_tel * np.pi / tel_size_m) * u.rad
    zern_max = CONFIG_INI.getint('zernikes', 'max_zern')
    sz = CONFIG_INI.getint('numerical', 'im_size_lamD_hcipy')

    # Create Zernike mode object for easier handling
    zern_mode = util.ZernikeMode(zernike_pol)

    #-# Mean subtraction for piston
    if zernike_pol == 1:
        coef -= np.mean(coef)

    #-# Generic segment shapes

    if telescope == 'JWST':
        # Load pupil from file
        pupil = fits.getdata(
            os.path.join(dataDir, 'segmentation', 'pupil.fits'))

        # Put pupil in randomly picked, slightly larger image array
        pup_im = np.copy(pupil)  # remove if lines below this are active
        #pup_im = np.zeros([tel_size_px, tel_size_px])
        #lim = int((pup_im.shape[1] - pupil.shape[1])/2.)
        #pup_im[lim:-lim, lim:-lim] = pupil
        # test_seg = pupil[394:,197:315]    # this is just so that I can display an individual segment when the pupil is 512
        # test_seg = pupil[:203,392:631]    # ... when the pupil is 1024
        # one_seg = np.zeros_like(test_seg)
        # one_seg[:110, :] = test_seg[8:, :]    # this is the centered version of the individual segment for 512 px pupil

        # Creat a mini-segment (one individual segment from the segmented aperture)
        mini_seg_real = poppy.NgonAperture(
            name='mini', radius=real_size_seg
        )  # creating real mini segment shape with poppy
        #test = mini_seg_real.sample(wavelength=wvln, grid_size=flat_diam, return_scale=True)   # fix its sampling with wavelength
        mini_hdu = mini_seg_real.to_fits(wavelength=wvln,
                                         npix=size_seg)  # make it a fits file
        mini_seg = mini_hdu[
            0].data  # extract the image data from the fits file

    elif telescope == 'ATLAST':
        # Create mini-segment
        pupil_grid = hcipy.make_pupil_grid(dims=tel_size_px,
                                           diameter=real_size_seg)
        focal_grid = hcipy.make_focal_grid(
            pupil_grid, sampling, sz, wavelength=wvln.to(
                u.m).value)  # fov = lambda/D radius of total image
        prop = hcipy.FraunhoferPropagator(pupil_grid, focal_grid)

        mini_seg_real = hcipy.hexagonal_aperture(circum_diameter=real_size_seg,
                                                 angle=np.pi / 2)
        mini_seg_hc = hcipy.evaluate_supersampled(
            mini_seg_real, pupil_grid, 4
        )  # the supersampling number doesn't really matter in context with the other numbers
        mini_seg = mini_seg_hc.shaped  # make it a 2D array

        # Redefine size_seg if using HCIPy
        size_seg = mini_seg.shape[0]

        # Make stand-in pupil for DH array
        pupil = fits.getdata(
            os.path.join(dataDir, 'segmentation', 'pupil.fits'))
        pup_im = np.copy(pupil)

    #-# Generate a dark hole mask
    #TODO: simplify DH generation and usage
    dh_area = util.create_dark_hole(
        pup_im, inner_wa, outer_wa, sampling
    )  # this might become a problem if pupil size is not same like pastis image size. fine for now though.
    if telescope == 'ATLAST':
        dh_sz = util.zoom_cen(dh_area, sz * sampling)

    #-# Import information form segmentation script
    Projection_Matrix = fits.getdata(
        os.path.join(dataDir, 'segmentation', 'Projection_Matrix.fits'))
    vec_list = fits.getdata(
        os.path.join(dataDir, 'segmentation', 'vec_list.fits'))  # in pixels
    NR_pairs_list = fits.getdata(
        os.path.join(dataDir, 'segmentation', 'NR_pairs_list_int.fits'))

    # Figure out how many NRPs we're dealing with
    NR_pairs_nb = NR_pairs_list.shape[0]

    #-# Chose whether calibration factors to do the calibraiton with
    if cali:
        filename = 'calibration_' + zern_mode.name + '_' + zern_mode.convention + str(
            zern_mode.index)
        ck = fits.getdata(
            os.path.join(dataDir, 'calibration', filename + '.fits'))
    else:
        ck = np.ones(nb_seg)

    coef = coef * ck

    #-# Generic coefficients
    # the coefficients in front of the non redundant pairs, the A_q in eq. 13 in Leboulleux et al. 2018
    generic_coef = np.zeros(
        NR_pairs_nb
    ) * u.nm * u.nm  # setting it up with the correct units this will have

    for q in range(NR_pairs_nb):
        for i in range(nb_seg):
            for j in range(i + 1, nb_seg):
                if Projection_Matrix[i, j, 0] == q + 1:
                    generic_coef[q] += coef[i] * coef[j]

    #-# Constant sum and cosine sum - calculating eq. 13 from Leboulleux et al. 2018
    if telescope == 'JWST':
        i_line = np.linspace(-im_size_pastis / 2., im_size_pastis / 2.,
                             im_size_pastis)
        tab_i, tab_j = np.meshgrid(i_line, i_line)
        cos_u_mat = np.zeros(
            (int(im_size_pastis), int(im_size_pastis), NR_pairs_nb))
    elif telescope == 'ATLAST':
        i_line = np.linspace(-(2 * sz * sampling) / 2.,
                             (2 * sz * sampling) / 2., (2 * sz * sampling))
        tab_i, tab_j = np.meshgrid(i_line, i_line)
        cos_u_mat = np.zeros((int((2 * sz * sampling)), int(
            (2 * sz * sampling)), NR_pairs_nb))

    # Calculating the cosine terms from eq. 13.
    # The -1 with each NR_pairs_list is because the segment names are saved starting from 1, but Python starts
    # its indexing at zero, so we have to make it start at zero here too.
    for q in range(NR_pairs_nb):
        # cos(b_q <dot> u): b_q with 1 <= q <= NR_pairs_nb is the basis of NRPS, meaning the distance vectors between
        #                   two segments of one NRP. We can read these out from vec_list.
        #                   u is the position (vector) in the detector plane. Here, those are the grids tab_i and tab_j.
        # We need to calculate the dot product between all b_q and u, so in each iteration (for q), we simply add the
        # x and y component.
        cos_u_mat[:, :, q] = np.cos(
            px_sq_to_rad *
            (vec_list[NR_pairs_list[q, 0] - 1, NR_pairs_list[q, 1] - 1, 0] *
             tab_i) + px_sq_to_rad *
            (vec_list[NR_pairs_list[q, 0] - 1, NR_pairs_list[q, 1] - 1, 1] *
             tab_j)) * u.dimensionless_unscaled

    sum1 = np.sum(
        coef**2
    )  # sum of all a_{k,l} in eq. 13 - this works only for single Zernikes (l fixed), because np.sum would sum over l too, which would be wrong.
    if telescope == 'JWST':
        sum2 = np.zeros(
            (int(im_size_pastis), int(im_size_pastis))
        ) * u.nm * u.nm  # setting it up with the correct units this will have
    elif telescope == 'ATLAST':
        sum2 = np.zeros(
            (int(2 * sz * sampling), int(2 * sz * sampling))) * u.nm * u.nm

    for q in range(NR_pairs_nb):
        sum2 = sum2 + generic_coef[q] * cos_u_mat[:, :, q]

    #-# Local Zernike
    if telescope == 'JWST':
        # Generate a basis of Zernikes with the mini segment being the support
        isolated_zerns = zern.hexike_basis(nterms=zern_max,
                                           npix=size_seg,
                                           rho=None,
                                           theta=None,
                                           vertical=False,
                                           outside=0.0)

        # Calculate the Zernike that is currently being used and put it on one single subaperture, the result is Zer
        # Apply the currently used Zernike to the mini-segment.
        if zernike_pol == 1:
            Zer = np.copy(mini_seg)
        elif zernike_pol in range(2, zern_max - 2):
            Zer = np.copy(mini_seg)
            Zer = Zer * isolated_zerns[zernike_pol - 1]

        # Fourier Transform of the Zernike - the global envelope
        mf = mft.MatrixFourierTransform()
        ft_zern = mf.perform(Zer, im_size_pastis / sampling, im_size_pastis)

    elif telescope == 'ATLAST':
        isolated_zerns = hcipy.make_zernike_basis(num_modes=zern_max,
                                                  D=real_size_seg,
                                                  grid=pupil_grid,
                                                  radial_cutoff=False)
        Zer = hcipy.Wavefront(mini_seg_hc * isolated_zerns[zernike_pol - 1],
                              wavelength=wvln.to(u.m).value)

        # Fourier transform the Zernike
        ft_zern = prop(Zer)

    #-# Final image
    if telescope == 'JWST':
        # Generating the final image that will get passed on to the outer scope, I(u) in eq. 13
        intensity = np.abs(ft_zern)**2 * (sum1.value + 2. * sum2.value)
    elif telescope == 'ATLAST':
        intensity = ft_zern.intensity.shaped * (sum1.value + 2. * sum2.value)

    # PASTIS is only valid inside the dark hole, so we cut out only that part
    if telescope == 'JWST':
        tot_dh_im_size = sampling * (outer_wa + 3)
        intensity_zoom = util.zoom_cen(
            intensity, tot_dh_im_size
        )  # zoom box is (owa + 3*lambda/D) wide, in terms of lambda/D
        dh_area_zoom = util.zoom_cen(dh_area, tot_dh_im_size)

        dh_psf = dh_area_zoom * intensity_zoom

    elif telescope == 'ATLAST':
        dh_psf = dh_sz * intensity
    """
    # Create plots.
    plt.subplot(1, 3, 1)
    plt.imshow(pupil, origin='lower')
    plt.title('JWST pupil and diameter definition')
    plt.plot([46.5, 464.5], [101.5, 409.5], 'r-')   # show how the diagonal of the pupil is defined

    plt.subplot(1, 3, 2)
    plt.imshow(mini_seg, origin='lower')
    plt.title('JWST individual mini-segment')

    plt.subplot(1, 3, 3)
    plt.imshow(dh_psf, origin='lower')
    plt.title('JWST dark hole')
    plt.show()
    """

    # dh_psf is the image of the dark hole only, the pixels outside of it are zero
    # intensity is the entire final image
    return dh_psf, intensity
Ejemplo n.º 8
0
def num_matrix_luvoir(design):
    """
    Generate a numerical PASTIS matrix for a LUVOIR A coronagraph.

    All inputs are read from the (local) configfile and saved to the specified output directory.
    The LUVOIR STDT delivery in May 2018 included three different apodizers
    we can work with, so I will implement an easy way of making a choice between them.
    small, medium and large
    """

    # Keep track of time
    start_time = time.time()  # runtime is currently around 150 minutes
    print('Building numerical matrix for LUVOIR\n')

    ### Parameters

    # System parameters
    resDir = os.path.join(CONFIG_INI.get('local', 'local_data_path'), 'active',
                          'matrix_numerical')
    zern_number = CONFIG_INI.getint('calibration', 'zernike')
    zern_mode = util.ZernikeMode(
        zern_number)  # Create Zernike mode object for easier handling

    # General telescope parameters
    nb_seg = CONFIG_INI.getint('LUVOIR', 'nb_subapertures')
    wvln = CONFIG_INI.getfloat('LUVOIR', 'lambda') * 1e-9  # m
    diam = CONFIG_INI.getfloat('LUVOIR', 'diameter')  # m
    nm_aber = CONFIG_INI.getfloat('calibration',
                                  'single_aberration') * 1e-9  # m

    # Image system parameters
    im_lamD = 30  # image size in lambda/D
    sampling = 4

    # Print some of the defined parameters
    print('LUVOIR apodizer design: {}'.format(design))
    print()
    print('Wavelength: {} m'.format(wvln))
    print('Telescope diameter: {} m'.format(diam))
    print('Number of segments: {}'.format(nb_seg))
    print()
    print('Image size: {} lambda/D'.format(im_lamD))
    print('Sampling: {} px per lambda/D'.format(sampling))

    ### Setting up the paths

    # If subfolder "matrix_numerical" doesn't exist yet, create it.
    if not os.path.isdir(resDir):
        os.mkdir(resDir)

    # If subfolder "OTE_images" doesn't exist yet, create it.
    if not os.path.isdir(os.path.join(resDir, 'OTE_images')):
        os.mkdir(os.path.join(resDir, 'OTE_images'))

    # If subfolder "psfs" doesn't exist yet, create it.
    if not os.path.isdir(os.path.join(resDir, 'psfs')):
        os.mkdir(os.path.join(resDir, 'psfs'))

    ### Instantiate Luvoir telescope with chosen apodizer design
    optics_input = '/Users/ilaginja/Documents/LabWork/ultra/LUVOIR_delivery_May2019/'
    luvoir = LuvoirAPLC(optics_input, design, sampling)

    ### Dark hole mask
    dh_outer = hc.circular_aperture(2 * luvoir.apod_dict[design]['owa'] *
                                    luvoir.lam_over_d)(luvoir.focal_det)
    dh_inner = hc.circular_aperture(2 * luvoir.apod_dict[design]['iwa'] *
                                    luvoir.lam_over_d)(luvoir.focal_det)
    dh_mask = (dh_outer - dh_inner).astype('bool')

    ### Reference images for contrast normalization and coronagraph floor
    unaberrated_coro_psf, ref = luvoir.calc_psf(ref=True,
                                                display_intermediate=False,
                                                return_intermediate=False)
    norm = np.max(ref)

    dh_intensity = unaberrated_coro_psf / norm * dh_mask
    contrast_floor = np.mean(dh_intensity[np.where(dh_intensity != 0)])
    print(contrast_floor)

    ### Generating the PASTIS matrix and a list for all contrasts
    matrix_direct = np.zeros([nb_seg, nb_seg])  # Generate empty matrix
    all_psfs = []
    all_contrasts = []

    print('nm_aber: {} m'.format(nm_aber))

    for i in range(nb_seg):
        for j in range(nb_seg):

            print('\nSTEP: {}-{} / {}-{}'.format(i + 1, j + 1, nb_seg, nb_seg))

            # Put aberration on correct segments. If i=j, apply only once!
            luvoir.flatten()
            luvoir.set_segment(i + 1, nm_aber / 2, 0, 0)
            if i != j:
                luvoir.set_segment(j + 1, nm_aber / 2, 0, 0)

            print('Calculating coro image...')
            image, inter = luvoir.calc_psf(ref=False,
                                           display_intermediate=False,
                                           return_intermediate='intensity')
            # Normalize PSF by reference image
            psf = image / norm

            # Save image to disk
            filename_psf = 'psf_' + zern_mode.name + '_' + zern_mode.convention + str(
                zern_mode.index) + '_segs_' + str(i + 1) + '-' + str(j + 1)
            hc.write_fits(psf,
                          os.path.join(resDir, 'psfs', filename_psf + '.fits'))
            all_psfs.append(psf)

            # Save OPD images for testing (are these actually surface images, not OPD?)
            opd_name = 'opd_' + zern_mode.name + '_' + zern_mode.convention + str(
                zern_mode.index) + '_segs_' + str(i + 1) + '-' + str(j + 1)
            plt.clf()
            hc.imshow_field(inter['seg_mirror'],
                            mask=luvoir.aperture,
                            cmap='RdBu')
            plt.savefig(os.path.join(resDir, 'OTE_images', opd_name + '.pdf'))

            print('Calculating mean contrast in dark hole')
            dh_intensity = psf * dh_mask
            contrast = np.mean(dh_intensity[np.where(dh_intensity != 0)])
            print('contrast:', contrast)
            all_contrasts.append(contrast)

            # Fill according entry in the matrix and subtract baseline contrast
            matrix_direct[i, j] = contrast - contrast_floor

    # Transform saved lists to arrays
    all_psfs = np.array(all_psfs)
    all_contrasts = np.array(all_contrasts)

    # Filling the off-axis elements
    matrix_two_N = np.copy(
        matrix_direct
    )  # This is just an intermediary copy so that I don't mix things up.
    matrix_pastis = np.copy(
        matrix_direct)  # This will be the final PASTIS matrix.

    for i in range(nb_seg):
        for j in range(nb_seg):
            if i != j:
                matrix_off_val = (matrix_two_N[i, j] - matrix_two_N[i, i] -
                                  matrix_two_N[j, j]) / 2.
                matrix_pastis[i, j] = matrix_off_val
                print('Off-axis for i{}-j{}: {}'.format(
                    i + 1, j + 1, matrix_off_val))

    # Normalize matrix for the input aberration - the whole code is set up to be normalized to 1 nm, and even if
    # the units entered are in m for the sake of HCIPy, everything else is assuming the baseline is 1nm, so the
    # normalization can be taken out if we're working with exactly 1 nm for the aberration, even if entered in meters.
    #matrix_pastis /= np.square(nm_aber)

    # Save matrix to file
    filename_matrix = 'PASTISmatrix_num_' + zern_mode.name + '_' + zern_mode.convention + str(
        zern_mode.index)
    hc.write_fits(matrix_pastis, os.path.join(resDir,
                                              filename_matrix + '.fits'))
    print('Matrix saved to:', os.path.join(resDir, filename_matrix + '.fits'))

    # Save the PSF image *cube* as well (as opposed to each one individually)
    hc.write_fits(
        all_psfs,
        os.path.join(resDir, 'psfs', 'psf_cube' + '.fits'),
    )
    np.savetxt(os.path.join(resDir, 'contrasts.txt'), all_contrasts, fmt='%e')

    # Tell us how long it took to finish.
    end_time = time.time()
    print('Runtime for matrix_building.py:', end_time - start_time, 'sec =',
          (end_time - start_time) / 60, 'min')
    print('Data saved to {}'.format(resDir))
Ejemplo n.º 9
0
    plt.title('JWST dark hole')
    plt.show()
    """

    # dh_psf is the image of the dark hole only, the pixels outside of it are zero
    # intensity is the entire final image
    return dh_psf, intensity


if __name__ == '__main__':

    "Testing the uncalibrated analytical model\n"

    ### Define the aberration coeffitients "coef"
    telescope = CONFIG_INI.get('telescope', 'name')
    nb_seg = CONFIG_INI.getint(telescope, 'nb_subapertures')
    zern_max = CONFIG_INI.getint('zernikes', 'max_zern')

    nm_aber = CONFIG_INI.getfloat(
        'calibration',
        'single_aberration') * u.nm  # [nm] amplitude of aberration
    zern_number = CONFIG_INI.getint(
        'calibration',
        'zernike')  # Which (Noll) Zernike we are calibrating for
    wss_zern_nb = util.noll_to_wss(
        zern_number)  # Convert from Noll to WSS framework

    ### What segmend are we aberrating? ###
    i = 0  # segment 1 --> i=0, seg 2 --> i=1, etc.
    cali = False  # calibrated or not?
    ### ------------------------------- ###
Ejemplo n.º 10
0
def seg_mirror_test():
    """
    Testing the integrated energy of images produced by HCIPy vs Poppy segmented DMs.

    This is now deprecated as I am using directly the hcipy SM, but specifically from an older commit:
    from hcipy.optics.segmented_mirror import SegmentedMirror
    """

    # Parameters
    which_tel = CONFIG_INI.get('telescope', 'name')
    NPIX = CONFIG_INI.getint('numerical', 'tel_size_px')
    PUP_DIAMETER = CONFIG_INI.getfloat(which_tel, 'diameter')
    GAPSIZE = CONFIG_INI.getfloat(which_tel, 'gaps')
    FLATTOFLAT = CONFIG_INI.getfloat(which_tel, 'flat_to_flat')

    wvln = 638e-9
    lamD = 20
    samp = 4
    norm = False

    fac = 6.55

    # --------------------------------- #
    #aber_rad = 6.2
    aber_array = np.linspace(0, 2*np.pi, 50, True)
    print('Aber in rad: \n{}'.format(aber_array))
    print('Aber in m: \n{}'.format(util.aber_to_opd(aber_array, wvln)))
    # --------------------------------- #

    ### HCIPy SM

    # HCIPy grids and propagator
    pupil_grid = hcipy.make_pupil_grid(dims=NPIX, diameter=PUP_DIAMETER)
    focal_grid = hcipy.make_focal_grid(pupil_grid, samp, lamD, wavelength=wvln)
    prop = hcipy.FraunhoferPropagator(pupil_grid, focal_grid)

    # Generate an aperture
    aper, seg_pos = get_atlast_aperture(normalized=norm)
    aper = hcipy.evaluate_supersampled(aper, pupil_grid, 1)

    # Instantiate the segmented mirror
    hsm = SegmentedMirror(aper, seg_pos)

    # Make a pupil plane wavefront from aperture
    wf = hcipy.Wavefront(aper, wavelength=wvln)

    ### Poppy SM

    psm = poppy.dms.HexSegmentedDeformableMirror(name='Poppy SM',
                                                 rings=3,
                                                 flattoflat=FLATTOFLAT * u.m,
                                                 gap=GAPSIZE * u.m,
                                                 center=False)

    ### Apply pistons
    hc_ims = []
    pop_ims = []
    for aber_rad in aber_array:

        # Flatten both SMs
        hsm.flatten()
        psm.flatten()

        # HCIPy
        for i in [19, 28]:
            hsm.set_segment(i, util.aber_to_opd(aber_rad, wvln)/2, 0, 0)

        # Poppy
        for i in [34, 25]:
            psm.set_actuator(i, util.aber_to_opd(aber_rad, wvln) * u.m, 0, 0)  # 34 in poppy is 19 in HCIPy

        ### Propagate to image plane
        ### HCIPy
        # Apply SM to pupil plane wf
        wf_fp_pistoned = hsm(wf)

        # Propagate from SM to image plane
        im_pistoned_hc = prop(wf_fp_pistoned)

        ### Poppy
        # Make an optical system with the Poppy SM and a detector
        osys = poppy.OpticalSystem()
        osys.add_pupil(psm)
        pxscle = 0.0031 * fac  # I'm tweaking pixelscale and fov_arcsec to match the HCIPy image
        fovarc = 0.05 * fac
        osys.add_detector(pixelscale=pxscle, fov_arcsec=fovarc, oversample=10)

        # Calculate the PSF
        psf = osys.calc_psf(wvln)

        # Get the PSF as an array
        im_pistoned_pop = psf[0].data

        hc_ims.append(im_pistoned_hc.intensity.shaped/np.max(im_pistoned_hc.intensity))
        pop_ims.append(im_pistoned_pop/np.max(im_pistoned_pop))

    ### Trying to do it with numbers
    hc_ims = np.array(hc_ims)
    pop_ims = np.array(pop_ims)

    sum_hc = np.sum(hc_ims, axis=(1,2))
    sum_pop = np.sum(pop_ims, axis=(1,2)) - 1.75   # the -1.75 is just there because I didn't bother about image normalization too much

    plt.suptitle('Image degradation of SMs')
    plt.plot(aber_array, sum_hc, label='HCIPy SM')
    plt.plot(aber_array, sum_pop, label='Poppy SM')
    plt.xlabel('rad')
    plt.ylabel('image sum')
    plt.legend()
    plt.show()
Ejemplo n.º 11
0
This is a module containing functions to generate the ATLAST pupil and simple coronagraphs from HCIPy.
"""
import os
import numpy as np
import matplotlib.pyplot as plt
import astropy.units as u
import hcipy
import poppy

from config import CONFIG_INI
import util_pastis as util


# Configfile imports
which_tel = CONFIG_INI.get('telescope', 'name')
pupil_size = CONFIG_INI.getint('numerical', 'tel_size_px')
PUP_DIAMETER = CONFIG_INI.getfloat(which_tel, 'diameter')


def get_atlast_aperture(normalized=False, with_segment_gaps=True, segment_transmissions=1, write_to_disk=False, outDir=None):
    """Make the ATLAST/HiCAT pupil mask.

    This function is a copy of make_hicat_aperture(), except that it also returns the segment positions.

    Parameters
    ----------
    normalized : boolean
        If this is True, the outer diameter will be scaled to 1. Otherwise, the
        diameter of the pupil will be 15.0 meters.
    with_segment_gaps : boolean
        Include the gaps between individual segments in the aperture.
Ejemplo n.º 12
0
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import astropy.units as u
import poppy
import webbpsf

from config import CONFIG_INI
import util_pastis as util

# Setting to ensure that PyCharm finds the webbpsf-data folder. If you don't know where it is, find it with:
# webbpsf.utils.get_webbpsf_data_path()
# --> e.g.: >>source activate astroconda   >>ipython   >>import webbpsf   >>webbpsf.utils.get_webbpsf_data_path()
os.environ['WEBBPSF_PATH'] = CONFIG_INI.get('local', 'webbpsf_data_path')

which_tel = CONFIG_INI.get('telescope', 'name')
nb_seg = CONFIG_INI.getint(which_tel, 'nb_subapertures')
flat_to_flat = CONFIG_INI.getfloat(which_tel, 'flat_to_flat')
wvl = CONFIG_INI.getfloat(which_tel, 'lambda') * u.nm
im_size_pupil = CONFIG_INI.getint('numerical', 'tel_size_px')
flat_diam = CONFIG_INI.getfloat(which_tel, 'flat_diameter') * u.m
wss_segs = webbpsf.constants.SEGNAMES_WSS_ORDER
im_size_e2e = CONFIG_INI.getint('numerical', 'im_size_px_webbpsf')
fpm = CONFIG_INI.get(which_tel, 'focal_plane_mask')  # focal plane mask
lyot_stop = CONFIG_INI.get(which_tel, 'pupil_plane_stop')  # Lyot stop
filter = CONFIG_INI.get(which_tel, 'filter_name')


def get_jwst_coords(outDir):

    #-# Generate the pupil with segments and spiders
Ejemplo n.º 13
0
def make_aperture_nrp():

    # Keep track of time
    start_time = time.time()   # runtime currently is around 2 seconds for JWST, 9 minutes for ATLAST

    # Parameters
    telescope = CONFIG_INI.get('telescope', 'name').upper()
    localDir = os.path.join(CONFIG_INI.get('local', 'local_data_path'), 'active')
    outDir = os.path.join(localDir, 'segmentation')
    nb_seg = CONFIG_INI.getint(telescope, 'nb_subapertures')   # Number of apertures, without central obscuration
    flat_diam = CONFIG_INI.getfloat(telescope, 'diameter') * u.m
    im_size_pupil = CONFIG_INI.getint('numerical', 'tel_size_px')
    m_to_px = im_size_pupil/flat_diam      # for conversion from meters to pixels: 3 [m] = 3 * m_to_px [px]

    print('Running aperture generation for {}\n'.format(telescope))

    # If main subfolder "active" doesn't exist yet, create it.
    if not os.path.isdir(localDir):
        os.mkdir(localDir)

    # If subfolder "segmentation" doesn't exist yet, create it.
    if not os.path.isdir(outDir):
        os.mkdir(outDir)

    #-# Get the coordinates of the central pixel of each segment and save aperture to disk
    print('Getting segment centers')
    seg_position = np.zeros((nb_seg, 2))

    if telescope == 'JWST':
        from e2e_simulators import webbpsf_imaging as webbim
        seg_position = webbim.get_jwst_coords(outDir)

    elif telescope == 'ATLAST':
        from e2e_simulators import atlast_imaging as atim
        _aper, seg_coords = atim.get_atlast_aperture(normalized=False, write_to_disk=True, outDir=outDir)

        seg_position[:,0] = seg_coords.x
        seg_position[:,1] = seg_coords.y

    # Save the segment center positions just in case we want to check them without running the code
    np.savetxt(os.path.join(outDir, 'seg_position.txt'), seg_position, fmt='%2.2f')
    # 18 segments, central segment (0) not included

    #-# Make distance list with distances between all of the segment centers among each other - in meters
    vec_list = np.zeros((nb_seg, nb_seg, 2))
    for i in range(nb_seg):
        for j in range(nb_seg):
            vec_list[i,j,:] = seg_position[i,:] - seg_position[j,:]
    vec_list *= u.m
    # Save, but gotta save x and y coordinate separately because of the function I use for saving
    np.savetxt(os.path.join(outDir, 'vec_list_x.txt'), vec_list[:,:,0], fmt='%2.2f')   # x distance; units: meters
    np.savetxt(os.path.join(outDir, 'vec_list_y.txt'), vec_list[:,:,1], fmt='%2.2f')   # y distance; units: meters

    #-# Nulling redundant vectors = setting redundant vectors in vec_list equal to zero
    # This was really hard to figure out, so I simply went with exactly the same way like in IDL.

    # Reshape vec_list array to one dimension so that we can implement the loop below
    longshape = vec_list.shape[0] * vec_list.shape[1]
    vec_flat = np.reshape(vec_list, (longshape, 2))
    # Save for testing
    np.savetxt(os.path.join(outDir, 'vec_flat.txt'), vec_flat)

    # Create array that will hold the nulled coordinates
    vec_null = np.copy(vec_flat)

    ap = 0
    rp = 0

    print('Nulling redundant segment pairs')
    for i in range(longshape):
        for j in range(i):   # Since i starts at 0, the case with i=0 & j=0 never happens, we start at i=1 & j=0
                             # With this loop setup, in all cases we have i != k, which is the baseline between a
                             # segment with itself - which is not a valid baseline, so these vectors are already set
                             # to 0 in vec_null (they're already 0 in vec_flat).

            # Some print statements for testing
            #print('i, j', i, j)
            #print('vec_flat[i,:]: ', vec_flat[i,:])
            #print('vec_flat[j,:]: ', vec_flat[j,:])
            #print('norm diff: ', np.abs(np.linalg.norm(vec_flat[i,:]) - np.linalg.norm(vec_flat[j,:])))
            #print('dir diff: ', np.linalg.norm(np.cross(vec_flat[i,:], vec_flat[j,:])))
            ap += 1

            # Check if length of two vectors is the same (within numerical limits)
            if np.abs(np.linalg.norm(vec_flat[i,:]) - np.linalg.norm(vec_flat[j,:])) <= 1.e-10:

                # Check if direction of two vectors is the same (within numerical limits)
                if np.linalg.norm(np.cross(vec_flat[i,:], vec_flat[j,:])) <= 1.e-10:

                    # Some print statements for testing
                    #print('i, j', i, j)
                    #print('vec_flat[i,:]: ', vec_flat[i, :])
                    #print('vec_flat[j,:]: ', vec_flat[j, :])
                    #print('norm diff: ', np.abs(np.linalg.norm(vec_flat[i, :]) - np.linalg.norm(vec_flat[j, :])))
                    #print('dir diff: ', np.linalg.norm(np.cross(vec_flat[i, :], vec_flat[j, :])))
                    rp += 1

                    vec_null[i,:] = [0, 0]

    # Reshape nulled array back into proper shape of vec_list
    vec_list_nulled = np.reshape(vec_null, (vec_list.shape[0], vec_list.shape[1], 2))
    # Save for testing
    np.savetxt(os.path.join(outDir, 'vec_list_nulled_x.txt'), vec_list_nulled[:, :, 0], fmt='%2.2f')
    np.savetxt(os.path.join(outDir, 'vec_list_nulled_y.txt'), vec_list_nulled[:, :, 1], fmt='%2.2f')

    #-# Extract the (number of) non redundant vectors: NR_distance_list

    # Create vector that holds distances between segments (instead of distance COORDINATES like in vec_list)
    distance_list = np.square(vec_list_nulled[:,:,0]) + np.square(vec_list_nulled[:,:,1])   # We use square distances so that we don't miss out on negative values
    nonzero = np.nonzero(distance_list)             # get indices of non-redundant segment pairs
    NR_distance_list = distance_list[nonzero]       # extract the list of distances between segments of NR pairs
    NR_pairs_nb = np.count_nonzero(distance_list)   # Counting how many non-redundant (NR) pairs we have
    # Save for testing
    np.savetxt(os.path.join(outDir, 'NR_distance_list.txt'), NR_distance_list, fmt='%2.2f')
    print('Number of non-redundant pairs: ' + str(NR_pairs_nb))

    #-# Select non redundant vectors
    # NR_pairs_list is a [NRP number, seg1, seg2] vector to hold non-redundant vector information.
    # NRPs are numbered from 1 to NR_pairs_nb, but Python indexing starts at 0!

    # Create the array of NRPs that will be the output
    NR_pairs_list = np.zeros((NR_pairs_nb, 2))   # NRP are numbered from 1 to NR_pairs_nb, as are the segments!

    # Loop over number of NRPs
    for i in range(NR_pairs_nb):
        # Since 'nonzero' holds the indices of segments, and Python indices start at 0, we have to add 1 to all the
        # 'segment names' in the array that tells us which NRP they form.
        NR_pairs_list[i,0] = nonzero[0][i] + 1
        NR_pairs_list[i,1] = nonzero[1][i] + 1
        # Again, NRP are numbered from 1 to NR_pairs_nb, and the segments are too!

    NR_pairs_list = NR_pairs_list.astype(int)
    # Save for testing
    np.savetxt(os.path.join(outDir, 'NR_pairs_list.txt'), NR_pairs_list, fmt='%i')

    #-# Generate projection matrix

    # Set diagonal to zero (distance between a segment and itself will always be zero)
    # Although I am pretty sure they already are. - yeah they are, vec_list is per definition a vector of distances
    # between all segments between each other, and the distance of a segment with itself is always zero.
    vec_list2 = np.copy(vec_list)
    for i in range(nb_seg):
        for j in range(nb_seg):
            if i ==j:
                vec_list2[i,j,:] = [0,0]

    # Save for testing
    np.savetxt(os.path.join(outDir, 'vec_list2_x.txt'), vec_list2[:, :, 0], fmt='%2.2f')
    np.savetxt(os.path.join(outDir, 'vec_list2_y.txt'), vec_list2[:, :, 1], fmt='%2.2f')

    # Initialize the projection matrix
    Projection_Matrix_int = np.zeros((nb_seg, nb_seg, 3))

    # Reshape arrays so that we can loop over them easier
    vec2_long = vec_list2.shape[0] * vec_list2.shape[1]
    vec2_flat = np.reshape(vec_list2, (vec2_long, 2))

    matrix_long = Projection_Matrix_int.shape[0] * Projection_Matrix_int.shape[1]
    matrix_flat = np.reshape(Projection_Matrix_int, (matrix_long, 3))

    print('Creating projection matrix')
    for i in range(np.square(nb_seg)):
        # Compare segment pair in i against all available NRPs.
        # Where it matches, record the NRP number in the matrix entry that corresponds to segments in i.

        for k in range(NR_pairs_nb):

            # Since the segment names (numbers) in NR_pairs_list assume we start numbering the segments at 1, we have to
            # subtract 1 every time when we need to convert a segment number into an index.
            # This means we write NR_pairs_list[k,0]-1 and NR_pairs_list[k,1]-1 .

            # Figure out which NRP a segment distance vector corresponds to - first by length.
            if np.abs(np.linalg.norm(vec2_flat[i, :]) - np.linalg.norm(vec_list[NR_pairs_list[k,0]-1, NR_pairs_list[k,1]-1, :])) <= 1.e-10:

                # Figure out which NRP a segment distance vector corresponds to - now by direction.
                if np.linalg.norm(np.cross(vec2_flat[i, :], vec_list[NR_pairs_list[k,0]-1, NR_pairs_list[k,1]-1, :])) <= 1.e-10:

                    matrix_flat[i, 0] = k + 1                       # Again: NRP start their numbering at 1
                    matrix_flat[i, 1] = NR_pairs_list[k,1] + 1      # and segments start their numbering at 1 too
                    matrix_flat[i, 2] = NR_pairs_list[k,0] + 1      # (see pupil image!).

    # Reshape matrix back to normal form
    Projection_Matrix = np.reshape(matrix_flat, (Projection_Matrix_int.shape[0], Projection_Matrix_int.shape[1], 3))

    # Convert the segment positions in vec_list from meters to pixels
    vec_list_px = vec_list * m_to_px

    #-# Save the arrays: vec_list, NR_pairs_list, Projection_Matrix
    util.write_fits(vec_list_px.value, os.path.join(outDir, 'vec_list.fits'), header=None, metadata=None)
    util.write_fits(NR_pairs_list, os.path.join(outDir, 'NR_pairs_list_int.fits'), header=None, metadata=None)
    util.write_fits(Projection_Matrix, os.path.join(outDir, 'Projection_Matrix.fits'), header=None, metadata=None)

    print('All outputs saved to {}'.format(outDir))

    # Tell us how long it took to finish.
    end_time = time.time()
    print('Runtime for aperture_definition.py:', end_time - start_time, 'sec =', (end_time - start_time)/60, 'min')