def preproc_files():
    if os.path.exists(data_dir + "HR8799_"+instrument+"_" + str(channel) + '_reduced.fits'):
        return
    if "sphere" in instrument.lower():
        science_name = "frames_removed.fits"
        psf_name = "psf_satellites_calibrated.fits"

        hdul = fits.open(data_dir + science_name)
        cube = hdul[0].data
        for channel,frame in enumerate(cube[:]):
            hdu = fits.PrimaryHDU(frame)
            hdul_new = fits.HDUList([hdu])
            hdul_new.writeto(data_dir + "HR8799_"+instrument+"_" + str(channel) + '_reduced.fits',overwrite=True)
        hdul.close()
        hdul = fits.open(data_dir + psf_name)
        cube = hdul[0].data
        for channel,frame in enumerate(cube[:]):
            hdu = fits.PrimaryHDU(frame)
            hdul_new = fits.HDUList([hdu])
            hdul_new.writeto(data_dir + "HR8799_"+instrument+"_" + str(channel) + '_PSF.fits',overwrite = True)
    elif "gpi" in instrument.lower():
        science_name = "*distorcorr.fits"
        psf_name = "*-original_PSF_cube.fits""
        psfs = fits.open(data_dir + psf_name)[0].data

        filelist = glob.glob(data_dir +science_name)
        dataset = GPI.GPIData(filelist, highpass=False, PSF_cube = psf)
        science = dataset.input

    hdul.close()
Exemplo n.º 2
0
def test_gpi_dataset():
    """
    Tests the GPI data interface, mostly on some edge cases since the general case is tested in test_parallelized_klip
    """
    # this shouldn't crash
    dataset = GPI.GPIData()

    # empty filelist should raise an error
    error_raised = False
    filelist = []
    try:
        dataset = GPI.GPIData(filelist)
    except ValueError:
        error_raised = True

    assert error_raised
Exemplo n.º 3
0
def init_gpi():
    # GPI
    # Original files 131117,131118,160919
    # PynPoint structure GPIH, GPIK1, GPIK2
    psf = fits.open(data_dir + "*original_PSF_cube.fits")[0].data
    if not os.path.isdir(data_dir + "pyklip"):
        os.makedirs(data_dir + "pyklip", exist_ok=True)

    filelist = glob.glob(data_dir + "*distorcorr.fits")
    dataset = GPI.GPIData(filelist, highpass=False, PSF_cube=psf)
    return dataset
def init():
    if "sphere" in instrument.lower():
        science_name = "frames_removed.fits"
        parang_name = "parang_removed.fits"
        psf_name = "psf_satellites_calibrated.fits"
        wlen_name = "wvs_micron.fits"

        # Science Data
        hdul = fits.open(data_dir + science_name)
        science = hdul[0].data
        hdul.close()
        # Parangs
        ang_hdul = fits.open(data_dir + parang_name)
        angles = ang_hdul[0].data
        ang_hdul.close()
        # Wavelength
        wvs_hdul = fits.open(data_dir + wlen_name)
        wlen = wvs_hdul[0].data
        wvs_hdul.close()
        # PSF Data
        psf_hdul = fits.open(data_dir + psf_name)
        psfs = psf_hdul[0].data
        psf_hdul.close()
    elif "gpi" in instrument.lower():
        science_name = "*distorcorr.fits"
        psf_name = "*-original_PSF_cube.fits""
        psfs = fits.open(data_dir + psf_name)[0].data

        filelist = glob.glob(data_dir +science_name)
        dataset = GPI.GPIData(filelist, highpass=False, PSF_cube = psf)

        ###### Useful values based on dataset ######
        N_frames = len(dataset.input)
        N_cubes = np.size(np.unique(dataset.filenums))
        nl = N_frames // N_cubes

        wlen = dataset.wvs[:nl]
        science = dataset.input
        angles = dataset.PAs


    global fwhm
    fwhm_fit = vip.var.fit_2dgaussian(psfs[0,0], crop=True, cropsize=11, debug=False)
    fwhm = np.mean(np.array([fwhm_fit['fwhm_y'],fwhm_fit['fwhm_x']])) # fit for fwhm

    return science,angles,wlen,psfs
Exemplo n.º 5
0
def test_mock_SDI(mock_klip_parallelized):
    """
    Tests SDI reduction with mocked data. 

    Args: `
        mock_klip_parallelized: mock patch object. 
    """

    #create a mocked return value for klip_parallelized that returns a 4d array of size (b,N,y,x) of zeros.
    mock_klip_parallelized.return_value = (np.zeros(
        (4, 111, 281, 281)), np.array([140, 140]))

    # time it
    t1 = time()

    # grab the files
    filelist = glob.glob(testdir +
                         os.path.join("data", "S20131210*distorcorr.fits"))
    assert (len(filelist) == 3)

    # create the dataset object
    dataset = GPI.GPIData(filelist)

    # run klip parallelized in SDI mode
    outputdir = testdir
    prefix = "mock"
    parallelized.klip_dataset(dataset,
                              outputdir=outputdir,
                              fileprefix=prefix,
                              annuli=9,
                              subsections=4,
                              movement=1,
                              numbasis=[1, 20, 50, 100],
                              calibrate_flux=True,
                              mode="SDI")

    mocked_glob = glob.glob(testdir + 'mock*')
    assert (len(mocked_glob) == 5)

    print("{0} seconds to run".format(time() - t1))
def roc_dataset(inputDir, dir_fakes, mvt, reduc_spectrum, fakes_spectrum):

    numbasis = [5]
    maxnumbasis = 10
    #contrast of the fake planets
    contrast = 4.0 * (10**-6)
    # Less important parameters
    mute = False  # Mute print statements
    resolution = 3.5  # FWHM of the PSF used for the small sample statistic correction
    mask_radius = 5  # (Pixels) Radius of the disk used to mask out objects in an image
    overwrite = False  # Force rewriting the files even if they already exist

    ###########################################################################################
    ## Generate PSF cube
    ###########################################################################################
    from pyklip.instruments import GPI

    # Generate PSF cube for GPI from the satellite spots
    filenames = glob(os.path.join(inputDir, "S*distorcorr.fits"))
    dataset = GPI.GPIData(filenames, highpass=True)
    dataset.generate_psf_cube(20, same_wv_only=True)
    PSF_cube = inputDir + os.path.sep + "beta_Pic_test" + "-original_PSF_cube.fits"
    # Save the original PSF calculated from combining the sat spots
    dataset.savedata(PSF_cube, dataset.psfs, filetype="PSF Spec Cube")

    ###########################################################################################
    ## Reduce the dataset with FMMF
    ###########################################################################################
    # This section will take for ever due to the FMMF reduction
    print("~~ FMMF and SNR ~~")

    # Define the function to read the dataset files.
    # In order to show a more advanced feature, in this case the satellite spots fluxes
    # are reestimated using the PSF cube calculated previously
    raw_read_func = lambda file_list: GPI.GPIData(file_list,
                                                  meas_satspot_flux=True,
                                                  numthreads=None,
                                                  highpass=True,
                                                  PSF_cube=
                                                  "*-original_PSF_cube.fits")
    filename = "S*distorcorr.fits"
    # Define the function to read the PSF cube file.
    # You can choose to directly give an array as the PSF cube and not bother with this.
    PSF_read_func = lambda file_list: GPI.GPIData(file_list, highpass=False)
    FMMFObj = FMMF(raw_read_func,
                   filename=filename,
                   spectrum=reduc_spectrum,
                   PSF_read_func=PSF_read_func,
                   PSF_cube=PSF_cube,
                   PSF_cube_wvs=None,
                   predefined_sectors="1.0 as",
                   label="FMMF",
                   overwrite=overwrite,
                   numbasis=numbasis,
                   maxnumbasis=maxnumbasis,
                   mvt=mvt)

    err_list = kpop_wrapper(inputDir, [FMMFObj],
                            spectrum_list=[reduc_spectrum],
                            mute_error=False)

    read_func = lambda file_list: GPI.GPIData(
        file_list, recalc_centers=False, recalc_wvs=False, highpass=False)

    # Definition of the SNR object
    filename = os.path.join(
        "kpop_FMMF", reduc_spectrum,
        "*{0:0.2f}-FM*-KL{1}.fits".format(mvt, numbasis[0]))
    FMMF_snr_obj = Stat(read_func,
                        filename,
                        type="pixel based SNR",
                        overwrite=overwrite,
                        mute=mute,
                        resolution=resolution,
                        mask_radius=mask_radius,
                        pix2as=GPI.GPIData.lenslet_scale)
    filename = os.path.join(
        "kpop_FMMF", reduc_spectrum,
        "*{0:0.2f}-FM*-KL{1}-SNRPerPixDr2.fits".format(mvt, numbasis[0]))
    FMMF_detec_obj = Detection(read_func,
                               filename,
                               mask_radius=None,
                               threshold=2,
                               overwrite=overwrite,
                               mute=mute,
                               IWA=9,
                               OWA=1. / 0.01414,
                               pix2as=GPI.GPIData.lenslet_scale)

    err_list = kpop_wrapper(inputDir, [FMMF_snr_obj, FMMF_detec_obj],
                            mute_error=False)

    ###########################################################################################
    ## Add Cross correlation and Matched Filter reductions
    filename = os.path.join(
        "kpop_FMMF", reduc_spectrum,
        "*_{0:0.2f}-speccube-KL{1}.fits".format(mvt, numbasis[0]))
    cc_obj = CrossCorr(read_func,
                       filename,
                       kernel_type="gaussian",
                       kernel_para=1.0,
                       collapse=True,
                       spectrum=reduc_spectrum,
                       folderName=reduc_spectrum,
                       overwrite=overwrite)
    mf_obj = Matchedfilter(read_func,
                           filename,
                           kernel_type="gaussian",
                           kernel_para=1.0,
                           label="pyklip",
                           spectrum=reduc_spectrum,
                           folderName=reduc_spectrum,
                           overwrite=overwrite)

    filename = os.path.join(
        "kpop_FMMF", reduc_spectrum,
        "*_{0:0.2f}-speccube-KL{1}-*gaussian.fits".format(mvt, numbasis[0]))
    klip_snr_obj = Stat(read_func,
                        filename,
                        type="pixel based SNR",
                        overwrite=overwrite,
                        mute=mute,
                        resolution=resolution,
                        mask_radius=mask_radius,
                        pix2as=GPI.GPIData.lenslet_scale)
    filename = os.path.join(
        "kpop_FMMF", reduc_spectrum,
        "*_{0:0.2f}-speccube-KL{1}-*gaussian-SNRPerPixDr2.fits".format(
            mvt, numbasis[0]))
    klip_detec_obj = Detection(read_func,
                               filename,
                               mask_radius=None,
                               threshold=2,
                               overwrite=overwrite,
                               mute=mute,
                               IWA=9,
                               OWA=1. / 0.01414,
                               pix2as=GPI.GPIData.lenslet_scale)

    err_list = kpop_wrapper(inputDir,
                            [cc_obj, mf_obj, klip_snr_obj, klip_detec_obj],
                            mute_error=False)

    ###########################################################################################
    ## Inject fake planet for the fakes
    ###########################################################################################
    # Fixed contrast fakes
    # Load the PSF cube that has been calculated from the sat spots
    PSF_cube = glob(os.path.join(inputDir, "*-original_PSF_cube.fits"))[0]
    PSF_cube_obj = PSF_read_func([PSF_cube])
    PSF_cube_arr = PSF_cube_obj.input
    PSF_cube_wvs = PSF_cube_obj.wvs

    if not os.path.exists(dir_fakes):
        os.makedirs(dir_fakes)
    shutil.copyfile(PSF_cube,
                    os.path.join(dir_fakes, os.path.basename(PSF_cube)))

    fake_flux_dict = dict(mode="contrast", contrast=contrast)
    fake_position_dict = dict(mode="spirals")
    spdc_glob = glob(inputDir + os.path.sep + "S*_spdc_distorcorr.fits")

    dataset = GPI.GPIData(spdc_glob,
                          highpass=True,
                          meas_satspot_flux=True,
                          numthreads=mp.cpu_count(),
                          PSF_cube=PSF_cube_arr)

    dataset, extra_keywords = fakes.generate_dataset_with_fakes(
        dataset,
        fake_position_dict,
        fake_flux_dict,
        spectrum=fakes_spectrum,
        PSF_cube=PSF_cube_arr,
        PSF_cube_wvs=PSF_cube_wvs,
        mute=mute)

    numwaves = np.size(np.unique(dataset.wvs))
    N_cubes = np.size(dataset.wvs) / numwaves
    suffix = fakes_spectrum + "_ROC"
    #Save each cube with the fakes
    for cube_id in range(N_cubes):
        spdc_filename = dataset.filenames[(cube_id * numwaves)].split(
            os.path.sep)[-1].split(".")[0]
        print("Saving file: " + dir_fakes + os.path.sep + spdc_filename + "_" +
              suffix + ".fits")
        dataset.savedata(dir_fakes + os.path.sep + spdc_filename + "_" +
                         suffix + ".fits",
                         dataset.input[(cube_id * numwaves):((cube_id + 1) *
                                                             numwaves), :, :],
                         filetype="raw spectral cube with fakes",
                         more_keywords=extra_keywords,
                         user_prihdr=dataset.prihdrs[cube_id],
                         user_exthdr=dataset.exthdrs[cube_id])

    ###########################################################################################
    ## Reduce dataset with fakes
    ###########################################################################################
    # Object to reduce the fake dataset with FMMF
    raw_nohpf_read_func = lambda file_list: GPI.GPIData(
        file_list,
        meas_satspot_flux=True,
        numthreads=None,
        highpass=False,
        PSF_cube="*-original_PSF_cube.fits")
    filename = "S*_spdc_distorcorr_{0}_ROC.fits".format(fakes_spectrum)
    FMMFObj_fk = FMMF(raw_nohpf_read_func,
                      filename=filename,
                      spectrum=reduc_spectrum,
                      PSF_read_func=PSF_read_func,
                      PSF_cube=PSF_cube,
                      PSF_cube_wvs=None,
                      predefined_sectors="1.0 as",
                      label="FMMF_ROC",
                      overwrite=overwrite,
                      numbasis=numbasis,
                      maxnumbasis=maxnumbasis,
                      mvt=mvt,
                      fakes_only=True,
                      pix2as=GPI.GPIData.lenslet_scale)

    err_list = kpop_wrapper(dir_fakes, [FMMFObj_fk],
                            spectrum_list=[reduc_spectrum],
                            mute_error=False)

    read_func = lambda file_list: GPI.GPIData(
        file_list, recalc_centers=False, recalc_wvs=False, highpass=False)

    # Definition of the SNR object
    filename = os.path.join(
        "kpop_FMMF_ROC", reduc_spectrum,
        "*{0:0.2f}-FM*-KL{1}.fits".format(mvt, numbasis[0]))
    filename_noPlanets = os.path.join(
        inputDir, "kpop_FMMF", reduc_spectrum,
        "*{0:0.2f}-FM*-KL{1}.fits".format(mvt, numbasis[0]))
    FMMF_snr_Obj_fk = Stat(read_func,
                           filename,
                           filename_noPlanets=filename_noPlanets,
                           type="pixel based SNR",
                           overwrite=overwrite,
                           mute=mute,
                           resolution=resolution,
                           mask_radius=mask_radius,
                           N_threads=-1,
                           pix2as=GPI.GPIData.lenslet_scale)
    filename_fits = os.path.join(
        "kpop_FMMF_ROC", reduc_spectrum,
        "*{0:0.2f}-FM*-KL{1}-SNRPerPixDr2.fits".format(mvt, numbasis[0]))
    filename_csv = os.path.join(
        inputDir, "kpop_FMMF", reduc_spectrum,
        "*{0:0.2f}-FM*-KL{1}-SNRPerPixDr2-DetecTh2Mr4.csv".format(
            mvt, numbasis[0]))
    FMMF_ROC_Obj_fk = ROC(read_func,
                          filename,
                          filename_csv,
                          overwrite=overwrite,
                          mute=mute,
                          IWA=15,
                          OWA=70,
                          pix2as=GPI.GPIData.lenslet_scale)

    err_list = kpop_wrapper(dir_fakes, [FMMF_snr_Obj_fk, FMMF_ROC_Obj_fk],
                            mute_error=False)

    ###########################################################################################
    ## Add Cross correlation and Matched Filter reductions
    filename = os.path.join(
        "kpop_FMMF_ROC", reduc_spectrum,
        "*_{0:0.2f}-speccube-KL{1}.fits".format(mvt, numbasis[0]))
    cc_Obj_fk = CrossCorr(read_func,
                          filename,
                          kernel_type="gaussian",
                          kernel_para=1.0,
                          collapse=True,
                          spectrum=reduc_spectrum,
                          folderName=reduc_spectrum,
                          overwrite=overwrite)
    mf_Obj_fk = Matchedfilter(read_func,
                              filename,
                              kernel_type="gaussian",
                              kernel_para=1.0,
                              spectrum=reduc_spectrum,
                              folderName=reduc_spectrum,
                              overwrite=overwrite)

    filename = os.path.join(
        "kpop_FMMF_ROC", reduc_spectrum,
        "*_{0:0.2f}-speccube-KL{1}-*gaussian.fits".format(mvt, numbasis[0]))
    filename_noPlanets = os.path.join(
        inputDir, "kpop_FMMF", reduc_spectrum,
        "*_{0:0.2f}-speccube-KL{1}-*gaussian.fits".format(mvt, numbasis[0]))
    klip_snr_Obj_fk = Stat(read_func,
                           filename,
                           filename_noPlanets=filename_noPlanets,
                           type="pixel based SNR",
                           overwrite=overwrite,
                           mute=mute,
                           resolution=resolution,
                           mask_radius=mask_radius,
                           pix2as=GPI.GPIData.lenslet_scale)
    filename_csv = os.path.join(
        inputDir, "kpop_FMMF", reduc_spectrum,
        "*_{0:0.2f}-speccube-KL{1}-*gaussian-SNRPerPixDr2-DetecTh2Mr4.csv".
        format(mvt, numbasis[0]))
    filename_fits = os.path.join(
        "kpop_FMMF_ROC", reduc_spectrum,
        "*_{0:0.2f}-speccube-KL{1}-*gaussian-SNRPerPixDr2.fits".format(
            mvt, numbasis[0]))
    klip_ROC_Obj_fk = ROC(read_func,
                          filename_fits,
                          filename_csv,
                          overwrite=overwrite,
                          mute=mute,
                          IWA=15,
                          OWA=70,
                          pix2as=GPI.GPIData.lenslet_scale)

    err_list = kpop_wrapper(
        dir_fakes, [cc_Obj_fk, mf_Obj_fk, klip_snr_Obj_fk, klip_ROC_Obj_fk],
        mute_error=False)

    ###########################################################################################
    ## Remove the fake datasets because we don't need them anymore
    ###########################################################################################
    spdc_glob = glob(dir_fakes + os.path.sep +
                     "S*_spdc_distorcorr_{0}_ROC.fits".format(fakes_spectrum))
    for filename in spdc_glob:
        print("Removing {0}".format(filename))
        os.remove(filename)
Exemplo n.º 7
0
def test_exmaple_gpi_klip_dataset():
    """
    Tests standard pykip.parallelized.klip_dataset() with GPI data from the tutorial. Uses no spectral template

    """
    # time it
    t1 = time()

    # grab the files
    filelist = glob.glob(testdir +
                         os.path.join("data", "S20131210*distorcorr.fits"))
    # hopefully there is still 3 filelists
    assert (len(filelist) == 3)

    # create the dataset object
    dataset = GPI.GPIData(filelist, highpass=True)

    # run klip parallelized
    outputdir = testdir
    prefix = "example-betapic-j-k100a9s4m1"
    parallelized.klip_dataset(dataset,
                              outputdir=outputdir,
                              fileprefix=prefix,
                              annuli=9,
                              subsections=4,
                              movement=1,
                              numbasis=[1, 20, 50, 100],
                              calibrate_flux=True,
                              mode="ADI+SDI")

    # look at the output data. Validate the spectral cube
    spec_hdulist = fits.open("{out}/{pre}-KL20-speccube.fits".format(
        out=outputdir, pre=prefix))
    speccube_kl20 = spec_hdulist[1].data

    # check to make sure it's the right shape
    assert (speccube_kl20.shape == (37, 281, 281))

    # look at the output data. Validate the KL mode cube
    kl_hdulist = fits.open("{out}/{pre}-KLmodes-all.fits".format(out=outputdir,
                                                                 pre=prefix))
    klcube = kl_hdulist[1].data

    # check to make sure it's the right shape
    assert (klcube.shape == (4, 281, 281))

    # try to retrieve beta pic b.
    # True astrometry taken from Wang et al.(2016)
    true_sep = 426.6 / 1e3 / GPI.GPIData.lenslet_scale  # in pixels
    true_pa = 212.2  # degrees
    # guessing flux and FWHM
    true_flux = 1.7e-5
    true_fwhm = 2.3  # ~lambda/D for lambda=1.25 microns, D=8 m

    # find planet in collapsed cube
    collapsed_kl20 = klcube[1]
    flux_meas, x_meas, y_meas, fwhm_meas = fakes.retrieve_planet(
        collapsed_kl20,
        dataset.output_centers[0],
        dataset.output_wcs[0],
        true_sep,
        true_pa,
        searchrad=4,
        guesspeak=2.e-5,
        guessfwhm=2)
    print(flux_meas, x_meas, y_meas, fwhm_meas)

    # error thresholds
    # flux error
    assert np.abs((flux_meas - true_flux) / true_flux) < 0.4
    # positonal error
    theta = fakes.convert_pa_to_image_polar(true_pa, dataset.output_wcs[0])
    true_x = true_sep * np.cos(np.radians(theta)) + dataset.output_centers[0,
                                                                           0]
    true_y = true_sep * np.sin(np.radians(theta)) + dataset.output_centers[0,
                                                                           1]
    assert np.abs(true_x - x_meas) < 0.4
    assert np.abs(true_y - y_meas) < 0.4
    # fwhm error
    assert np.abs(true_fwhm - fwhm_meas) < 0.4

    # measure SNR of planet

    print("{0} seconds to run".format(time() - t1))
Exemplo n.º 8
0
def test_adi_gpi_klip_dataset_with_fakes_twice(filelist=None):
    """
    Tests ADI reduction with fakes injected at certain position angles. And tests we can run it twice and still be ok

    Also tests lite mode

    Args:
        filelist: if not None, supply files to test on. Otherwise use standard beta pic data
    """
    # time it
    t1 = time()

    # grab the files
    if filelist is None:
        filelist = glob.glob(testdir +
                             os.path.join("data", "S20131210*distorcorr.fits"))

        # hopefully there is still 3 filelists
        assert (len(filelist) == 3)

    # create the dataset object
    dataset = GPI.GPIData(filelist,
                          skipslices=[0, 36],
                          bad_sat_spots=[3],
                          highpass=False)

    # save old centesr for later
    oldcenters = np.copy(dataset.centers)

    dataset.generate_psfs(boxrad=25 // 2)
    assert np.max(dataset.psfs > 0)

    # inject fake planet
    fake_seps = [20, 50, 40, 30]  # pixels
    fake_pas = [-50, -165, 130, 10]  # degrees
    fake_contrasts = np.array([1.e-4, 3.e-5, 5.e-5, 1.e-4])  # bright planet
    fake_injected_fluxes = fake_contrasts * np.mean(dataset.dn_per_contrast)
    for fake_sep, fake_pa, fake_flux in zip(fake_seps, fake_pas,
                                            fake_injected_fluxes):
        fakes.inject_planet(dataset.input, dataset.centers, fake_flux,
                            dataset.wcs, fake_sep, fake_pa)

    # run klip parallelized
    outputdir = testdir
    prefix = "adionly-betapic-j-k100a9s4m1-fakes50pa50"
    parallelized.klip_dataset(dataset,
                              outputdir=outputdir,
                              fileprefix=prefix,
                              annuli=9,
                              subsections=4,
                              movement=1,
                              numbasis=[1, 20, 50, 100],
                              calibrate_flux=False,
                              mode="ADI",
                              lite=True,
                              highpass=False)

    # before we do it again, check that dataset.centers remains unchanged
    assert (dataset.centers[0][0] == oldcenters[0][0])

    # And run it again to check that we can reuse the same dataset object
    parallelized.klip_dataset(dataset,
                              outputdir=outputdir,
                              fileprefix=prefix,
                              annuli=9,
                              subsections=4,
                              movement=1,
                              numbasis=[1, 20, 50, 100],
                              calibrate_flux=True,
                              mode="ADI",
                              lite=False,
                              highpass=True)

    # look at the output data. Validate the spectral cube
    spec_hdulist = fits.open("{out}/{pre}-KL20-speccube.fits".format(
        out=outputdir, pre=prefix))
    speccube_kl20 = spec_hdulist[1].data

    # check to make sure it's the right shape
    assert (speccube_kl20.shape == (35, 281, 281))

    # look at the output data. Validate the KL mode cube
    spec_hdulist = fits.open("{out}/{pre}-KLmodes-all.fits".format(
        out=outputdir, pre=prefix))
    klcube = spec_hdulist[1].data

    # check to make sure it's the right shape
    assert (klcube.shape == (4, 281, 281))

    # collapse data
    collapsed_kl20 = klcube[1]

    # try to retrieve fake planet
    for fake_sep, fake_pa, fake_contrast in zip(fake_seps, fake_pas,
                                                fake_contrasts):
        peakflux = fakes.retrieve_planet_flux(collapsed_kl20,
                                              dataset.output_centers[0],
                                              dataset.output_wcs[0],
                                              fake_sep,
                                              fake_pa,
                                              refinefit=True)

        assert (np.abs((peakflux / 0.7 - fake_contrast) / fake_contrast) < 0.5)

    print("{0} seconds to run".format(time() - t1))
Exemplo n.º 9
0
def contrast_dataset(inputDir, dir_fakes, mvt, reduc_spectrum, fakes_spectrum,
                     approx_throughput):
    ###########################################################################################
    ## Contrast curve parameters
    ###########################################################################################

    numbasis = [5]
    maxnumbasis = 10
    # Less important parameters
    mute = False  # Mute print statements
    mask_radius = 5  # (Pixels) Radius of the disk used to mask out objects in an image
    overwrite = False  # Force rewriting the files even if they already exist

    # contrast_range = [0.2,1.2] # Range of separation in arcsec for the contrast curve calculation
    pa_shift_list = [
        0, 180
    ]  # Position angle shift between the fakes in the different copies of the dataset

    ###########################################################################################
    ## Generate PSF cube
    ###########################################################################################

    # Generate PSF cube for GPI from the satellite spots
    filenames = glob(os.path.join(inputDir, "S*distorcorr.fits"))
    dataset = GPI.GPIData(filenames, highpass=True)
    dataset.generate_psf_cube(20, same_wv_only=True)
    PSF_cube = inputDir + os.path.sep + os.path.basename(
        filenames[0]).split(".fits")[0] + "-original_PSF_cube.fits"
    # Save the original PSF calculated from combining the sat spots
    dataset.savedata(PSF_cube,
                     dataset.psfs,
                     filetype="PSF Spec Cube",
                     pyklip_output=False)

    ###########################################################################################
    ## Reduce the dataset with FMMF
    ###########################################################################################
    # This section will take for ever due to the FMMF reduction
    print("~~ FMMF and SNR ~~")

    # Define the function to read the dataset files.
    # In order to show a more advanced feature, in this case the satellite spots fluxes
    # are reestimated using the PSF cube calculated previously
    raw_read_func = lambda file_list: GPI.GPIData(file_list,
                                                  meas_satspot_flux=True,
                                                  numthreads=None,
                                                  highpass=True,
                                                  PSF_cube=
                                                  "*-original_PSF_cube.fits")
    filename = "S*distorcorr.fits"
    # Define the function to read the PSF cube file.
    # You can choose to directly give an array as the PSF cube and not bother with this.
    PSF_read_func = lambda file_list: GPI.GPIData(file_list, highpass=False)
    w_ann0 = 5
    w_ann1 = 10
    tmp = (np.arange(8.7,30,w_ann0)).tolist()\
           + (np.arange(np.arange(8.7,30,w_ann0)[-1]+w_ann1,140,w_ann1)).tolist()
    annuli = [(rho1, rho2) for rho1, rho2 in zip(tmp[0:-1], tmp[1::])]
    FMMFObj = FMMF(raw_read_func,
                   filename=filename,
                   spectrum=reduc_spectrum,
                   PSF_read_func=PSF_read_func,
                   PSF_cube=PSF_cube,
                   PSF_cube_wvs=None,
                   subsections=1,
                   annuli=annuli,
                   label="FMMF",
                   overwrite=overwrite,
                   numbasis=numbasis,
                   maxnumbasis=maxnumbasis,
                   mvt=mvt)
    read_func = lambda file_list: GPI.GPIData(
        file_list, recalc_centers=False, recalc_wvs=False, highpass=False)

    # Definition of the SNR object
    filename = os.path.join(
        "kpop_FMMF", reduc_spectrum,
        "*{0:0.2f}-FM*-KL{1}.fits".format(mvt, numbasis[0]))
    FMMF_snr_obj = Stat(read_func,
                        filename,
                        type="pixel based SNR",
                        overwrite=overwrite,
                        mute=mute,
                        mask_radius=mask_radius,
                        pix2as=GPI.GPIData.lenslet_scale)
    filename = os.path.join(
        "kpop_FMMF", reduc_spectrum,
        "*{0:0.2f}-FM*-KL{1}-SNRPerPixDr2.fits".format(mvt, numbasis[0]))
    FMMF_detec_obj = Detection(read_func,
                               filename,
                               mask_radius=None,
                               threshold=2,
                               overwrite=overwrite,
                               mute=mute,
                               IWA=9,
                               OWA=1. / 0.01414,
                               pix2as=GPI.GPIData.lenslet_scale)

    err_list = kpop_wrapper(inputDir, [FMMFObj, FMMF_snr_obj, FMMF_detec_obj],
                            mute_error=False)

    ###########################################################################################
    ## Add Cross correlation and Matched Filter reductions
    filename = os.path.join(
        "kpop_FMMF", reduc_spectrum,
        "*_{0:0.2f}-speccube-KL{1}.fits".format(mvt, numbasis[0]))
    cc_obj = CrossCorr(read_func,
                       filename,
                       kernel_type="gaussian",
                       kernel_para=1.0,
                       collapse=True,
                       spectrum=reduc_spectrum,
                       folderName=reduc_spectrum,
                       overwrite=overwrite)
    mf_obj = Matchedfilter(read_func,
                           filename,
                           kernel_type="gaussian",
                           kernel_para=1.0,
                           label="pyklip",
                           spectrum=reduc_spectrum,
                           folderName=reduc_spectrum,
                           overwrite=overwrite)

    filename = os.path.join(
        "kpop_FMMF", reduc_spectrum,
        "*_{0:0.2f}-speccube-KL{1}-*gaussian.fits".format(mvt, numbasis[0]))
    klip_snr_obj = Stat(read_func,
                        filename,
                        type="pixel based SNR",
                        overwrite=overwrite,
                        mute=mute,
                        mask_radius=mask_radius,
                        pix2as=GPI.GPIData.lenslet_scale)
    filename = os.path.join(
        "kpop_FMMF", reduc_spectrum,
        "*_{0:0.2f}-speccube-KL{1}-*gaussian-SNRPerPixDr2.fits".format(
            mvt, numbasis[0]))
    klip_detec_obj = Detection(read_func,
                               filename,
                               mask_radius=None,
                               threshold=2,
                               overwrite=overwrite,
                               mute=mute,
                               IWA=9,
                               OWA=1. / 0.01414,
                               pix2as=GPI.GPIData.lenslet_scale)

    err_list = kpop_wrapper(inputDir,
                            [cc_obj, mf_obj, klip_snr_obj, klip_detec_obj],
                            mute_error=False)

    ###########################################################################################
    ## Initial guess for the contrast curve (to determine the contrast of the fakes)
    ###########################################################################################
    # This section can be user defined as long as sep_bins_center and cont_stddev are set.

    FMCont_filename = os.path.join(
        inputDir, "kpop_FMMF", reduc_spectrum,
        "*{0:0.2f}-FMCont-KL{1}.fits".format(mvt, numbasis[0]))
    contrast_image_obj = read_func([glob(FMCont_filename)[0]])

    cont_stddev, sep_bins = get_image_stddev(
        np.squeeze(contrast_image_obj.input),
        centroid=contrast_image_obj.centers[0])
    # Separation samples in pixels
    sep_bins_center = (np.array([r_tuple[0] for r_tuple in sep_bins]))
    # Approximative contrast curve at these separations
    approx_cont_curve = 5 * np.array(cont_stddev) / approx_throughput

    # ###########################################################################################
    ## Build fake datasets to be used to calibrate the conversion factor
    ###########################################################################################
    print("~~ Injecting fakes ~~")

    # Load the PSF cube that has been calculated from the sat spots
    PSF_cube = glob(os.path.join(inputDir, "*-original_PSF_cube.fits"))[0]
    PSF_cube_obj = PSF_read_func([PSF_cube])
    PSF_cube_arr = PSF_cube_obj.input
    PSF_cube_wvs = PSF_cube_obj.wvs

    if not os.path.exists(dir_fakes):
        os.makedirs(dir_fakes)
    shutil.copyfile(PSF_cube,
                    os.path.join(dir_fakes, os.path.basename(PSF_cube)))

    for pa_shift in pa_shift_list:
        #Define the fakes position and contrast
        fake_flux_dict = dict(mode="SNR",
                              SNR=10,
                              sep_arr=sep_bins_center,
                              contrast_arr=approx_cont_curve)
        # fake_flux_dict = dict(mode = "contrast",contrast=5e-6)
        fake_position_dict = dict(mode="spirals", pa_shift=pa_shift, annuli=10)

        # Inject the fakes
        spdc_glob = glob(inputDir + os.path.sep + "S*_spdc_distorcorr.fits")
        if overwrite or len(
                glob(
                    os.path.join(dir_fakes, "S*_spdc_distorcorr_{0}_PA*.fits").
                    format(fakes_spectrum))
        ) != len(pa_shift_list) * len(spdc_glob):
            dataset = GPI.GPIData(spdc_glob,
                                  highpass=True,
                                  meas_satspot_flux=True,
                                  numthreads=mp.cpu_count(),
                                  PSF_cube=PSF_cube_arr)

            dataset, extra_keywords = fakes.generate_dataset_with_fakes(
                dataset,
                fake_position_dict,
                fake_flux_dict,
                spectrum=fakes_spectrum,
                PSF_cube=PSF_cube_arr,
                PSF_cube_wvs=PSF_cube_wvs,
                mute=mute)

            numwaves = np.size(np.unique(dataset.wvs))
            N_cubes = np.size(dataset.wvs) / numwaves
            suffix = fakes_spectrum + "_PA{0:02d}".format(pa_shift)
            #Save each cube with the fakes
            for cube_id in range(N_cubes):
                spdc_filename = dataset.filenames[(cube_id * numwaves)].split(
                    os.path.sep)[-1].split(".")[0]
                print("Saving file: " + dir_fakes + os.path.sep +
                      spdc_filename + "_" + suffix + ".fits")
                dataset.savedata(dir_fakes + os.path.sep + spdc_filename +
                                 "_" + suffix + ".fits",
                                 dataset.input[(cube_id *
                                                numwaves):((cube_id + 1) *
                                                           numwaves), :, :],
                                 filetype="raw spectral cube with fakes",
                                 more_keywords=extra_keywords,
                                 user_prihdr=dataset.prihdrs[cube_id],
                                 user_exthdr=dataset.exthdrs[cube_id],
                                 pyklip_output=False)

    ###########################################################################################
    ## Reduce the fake dataset
    ###########################################################################################

    # Object to reduce the fake dataset with FMMF
    raw_nohpf_read_func = lambda file_list: GPI.GPIData(
        file_list,
        meas_satspot_flux=True,
        numthreads=None,
        highpass=False,
        PSF_cube="*-original_PSF_cube.fits")
    for pa_shift in pa_shift_list:
        # Object to reduce the fake dataset with FMMF
        filename = "S*_spdc_distorcorr_{0}_PA{1:02d}.fits".format(
            fakes_spectrum, pa_shift)
        FMMFObj = FMMF(raw_nohpf_read_func,
                       filename=filename,
                       spectrum=reduc_spectrum,
                       PSF_read_func=PSF_read_func,
                       PSF_cube=PSF_cube,
                       PSF_cube_wvs=None,
                       subsections=1,
                       annuli=annuli,
                       label="FMMF_PA{0:02d}".format(pa_shift),
                       overwrite=overwrite,
                       numbasis=numbasis,
                       maxnumbasis=maxnumbasis,
                       mvt=mvt,
                       fakes_only=True,
                       pix2as=GPI.GPIData.lenslet_scale)

        # Definition of the SNR object
        filename = os.path.join(
            "kpop_FMMF_PA{0:02d}".format(pa_shift), reduc_spectrum,
            "*{0:0.2f}-FM*-KL{1}.fits".format(mvt, numbasis[0]))
        filename_noPlanets = os.path.join(
            inputDir, "kpop_FMMF", reduc_spectrum,
            "*{0:0.2f}-FM*-KL{1}.fits".format(mvt, numbasis[0]))
        FMMF_snr_Obj_fk = Stat(read_func,
                               filename,
                               filename_noPlanets=filename_noPlanets,
                               type="pixel based SNR",
                               overwrite=overwrite,
                               mute=mute,
                               mask_radius=mask_radius,
                               N_threads=-1,
                               pix2as=GPI.GPIData.lenslet_scale)

        err_list = kpop_wrapper(dir_fakes, [FMMFObj, FMMF_snr_Obj_fk],
                                mute_error=False)

        ###########################################################################################
        ## Add Cross correlation and Matched Filter reductions
        filename = os.path.join(
            "kpop_FMMF_PA{0:02d}".format(pa_shift), reduc_spectrum,
            "*_{0:0.2f}-speccube-KL{1}.fits".format(mvt, numbasis[0]))
        cc_Obj_fk = CrossCorr(read_func,
                              filename,
                              kernel_type="gaussian",
                              kernel_para=1.0,
                              collapse=True,
                              spectrum=reduc_spectrum,
                              folderName=reduc_spectrum,
                              overwrite=overwrite)
        mf_Obj_fk = Matchedfilter(read_func,
                                  filename,
                                  kernel_type="gaussian",
                                  kernel_para=1.0,
                                  spectrum=reduc_spectrum,
                                  folderName=reduc_spectrum,
                                  overwrite=overwrite)

        filename = os.path.join(
            "kpop_FMMF_PA{0:02d}".format(pa_shift), reduc_spectrum,
            "*_{0:0.2f}-speccube-KL{1}-*gaussian.fits".format(
                mvt, numbasis[0]))
        filename_noPlanets = os.path.join(
            inputDir, "kpop_FMMF", reduc_spectrum,
            "*_{0:0.2f}-speccube-KL{1}-*gaussian.fits".format(
                mvt, numbasis[0]))
        klip_snr_Obj_fk = Stat(read_func,
                               filename,
                               filename_noPlanets=filename_noPlanets,
                               type="pixel based SNR",
                               overwrite=overwrite,
                               mute=mute,
                               mask_radius=mask_radius,
                               pix2as=GPI.GPIData.lenslet_scale)

        err_list = kpop_wrapper(dir_fakes,
                                [cc_Obj_fk, mf_Obj_fk, klip_snr_Obj_fk],
                                mute_error=False)

    ###########################################################################################
    ## Combine all the data to build the contrast curves
    ###########################################################################################

    # 2 lines to be removed
    read_func = lambda file_list: GPI.GPIData(
        file_list, recalc_centers=False, recalc_wvs=False, highpass=False)
    PSF_read_func = lambda file_list: GPI.GPIData(file_list, highpass=False)

    # # Extract Julian date of the file
    # tmp_file = os.path.join(dir_fakes,"kpop_FMMF_PA{0:02d}".format(pa_shift),reduc_spectrum,"*{0:0.2f}-FM*-KL{1}.fits".format(mvt,numbasis[0]))
    # hdulist = pyfits.open(glob(tmp_file)[0])
    # MJDOBS = hdulist[0].header['MJD-OBS']
    MJDOBS = None

    # For the 3 different metric, calculate the contrast curve using the fakes to calibrate the conversion factor.
    separation_list = []
    contrast_curve_list = []
    for FMMF_metric in [
            "FMMF-KL{0}".format(numbasis[0]), "FMCC-KL{0}".format(numbasis[0]),
            "FMCont-KL{0}".format(numbasis[0]),
            "speccube-KL{0}-crossCorrgaussian".format(numbasis[0]),
            "speccube-KL{0}-MF3Dgaussian".format(numbasis[0])
    ]:
        nofakes_filename = os.path.join(
            inputDir, "kpop_FMMF", reduc_spectrum,
            "*_{0:0.2f}-{1}.fits".format(mvt, FMMF_metric))
        fakes_filename_list = [
            os.path.join(
                dir_fakes, "kpop_FMMF_PA{0:02d}".format(pa_shift),
                reduc_spectrum,
                "*_{0:0.2f}-{1}.fits".format(mvt,
                                             FMMF_metric).format(pa_shift))
            for pa_shift in pa_shift_list
        ]
        fakes_SNR_filename_list = [
            os.path.join(
                dir_fakes, "kpop_FMMF_PA{0:02d}".format(pa_shift),
                reduc_spectrum, "*_{0:0.2f}-{1}-SNRPerPixDr2.fits".format(
                    mvt, FMMF_metric).format(pa_shift))
            for pa_shift in pa_shift_list
        ]
        separation, contrast_curve, throughput_tuple = calculate_contrast(
            read_func,
            nofakes_filename,
            fakes_filename_list,
            GPI.GPIData.lenslet_scale,
            mask_radius=mask_radius,
            Dr=2,
            save_dir=os.path.join(inputDir, "kpop_FMMF", reduc_spectrum),
            suffix=FMMF_metric + "-mvt{0:0.2f}".format(mvt),
            spec_type=fakes_spectrum,
            fakes_SNR_filename_list=fakes_SNR_filename_list,
            MJDOBS=MJDOBS)
        separation_list.append(separation)
        contrast_curve_list.append(contrast_curve)

    if 0:
        import matplotlib.pyplot as plt
        plt.figure(1)
        for separation, contrast_curve, FMMF_metric in zip(
                separation_list, contrast_curve_list, [
                    "FMMF-KL{0}".format(numbasis[0]), "FMCC-KL{0}".format(
                        numbasis[0]), "FMCont-KL{0}".format(numbasis[0]),
                    "speccube-KL{0}-crossCorrgaussian".format(numbasis[0]),
                    "speccube-KL{0}-MF3Dgaussian".format(numbasis[0])
                ]):
            plt.plot(separation, contrast_curve, label=FMMF_metric)
        plt.gca().set_yscale('log')
        plt.legend()
        plt.show()
    ###########################################################################################
    ## Remove the fake datasets because we don't need them anymore
    ###########################################################################################

    spdc_glob = glob(dir_fakes + os.path.sep +
                     "S*_spdc_distorcorr_{0}_*.fits".format(fakes_spectrum))
    for filename in spdc_glob:
        print("Removing {0}".format(filename))
        os.remove(filename)
def get_planet_spectrum(spectrum, wavelength, ori_wvs=None):
    """
    Get the normalized spectrum of a planet for a GPI spectral band or any wavelengths array.
    Spectra are extraced from .flx files from Mark Marley et al's models.

    Args:
        spectrum: Path of the .flx file containing the spectrum.
        wavelength: array of wavelenths in microns (or string with GPI band 'H', 'J', 'K1', 'K2', 'Y').
                (When using GPI spectral band wavelength samples are linearly spaced between the first and the last
                wavelength of the band.)

    Return:
        wavelengths: is the gpi sampling of the considered band in micrometer.
        spectrum: is the spectrum of the planet for the given band or wavelength array and normalized to unit mean.
    """

    if isinstance(spectrum, str):
        spec_data = []
        with open(spectrum, 'r') as f:
            for line in f:
                splitted_line = line.split()
                # splitted_line[0]: index
                # splitted_line[1]: wavelength (mum)
                # splitted_line[2]: T_brt
                # splitted_line[2]: flux in units of erg cm-2 sec-1 Hz-1 at the top of the planet's atmosphere

                try:
                    spec_data.append([
                        float(splitted_line[0]),
                        float(splitted_line[1]),
                        float(splitted_line[2]),
                        float(splitted_line[3])
                    ])
                except:
                    break

        spec_data = np.array(spec_data)
        N_samp = spec_data.shape[0]
        wave = spec_data[:, 1]
        spec = spec_data[:, 3]

        # Interpolate the spectrum on GPI sampling and convert F_nu to F_lambda
        spec = spec / wave**2
    else:
        wave = ori_wvs
        spec = spectrum

    # todo: check that it matches the actual sampling
    if isinstance(wavelength, str):
        import pyklip.instruments.GPI as GPI
        sampling_pip = GPI.get_gpi_wavelength_sampling(wavelength)
    else:
        sampling_pip = wavelength

    f = interp1d(wave, spec)
    # Interpolate the spectrum on GPI sampling and convert F_nu to F_lambda
    spec_pip = f(sampling_pip) / (sampling_pip**2)

    if 0:
        import matplotlib.pyplot as plt
        print((sampling_pip, spec_pip / np.nanmean(spec_pip)))
        plt.figure(2)
        wave_range = np.where((wave < sampling_pip[-1])
                              & (wave > sampling_pip[0]))
        plt.plot(wave[wave_range],
                 spec[wave_range] / np.nanmean(spec[wave_range]), 'r')
        plt.plot(sampling_pip, spec_pip / np.nanmean(spec_pip), 'b.')
        plt.show()

    return (sampling_pip, spec_pip / np.nanmean(spec_pip))
def get_star_spectrum(wvs_or_filter_name,
                      star_type=None,
                      temperature=None,
                      mute=None):
    """
    Get the spectrum of a star with given spectral type interpolating the pickles database.
    The spectrum is normalized to unit mean.
    It assumes type V star.

    Inputs:
        wvs_or_filter_name: array of wavelenths in microns (or string with GPI band 'H', 'J', 'K1', 'K2', 'Y').
                (When using GPI spectral band wavelength samples are linearly spaced between the first and the last
                wavelength of the band.)
        star_type: 'A5','F4',... Is ignored if temperature is defined.
                If star_type is longer than 2 characters it is truncated.
        temperature: temperature of the star. Overwrite star_type if defined.

    Output:
        (wavelengths, spectrum) where
            wavelengths: Sampling in mum.
            spectrum: is the spectrum of the star for the given band.
    """

    if mute is None:
        mute = False

    if isinstance(wvs_or_filter_name, str):
        import pyklip.instruments.GPI as GPI
        sampling_wvs = GPI.get_gpi_wavelength_sampling(wvs_or_filter_name)
    else:
        sampling_wvs = wvs_or_filter_name

    sampling_wvs_unique = np.unique(sampling_wvs)

    if star_type is None:
        return sampling_wvs, None

    if len(star_type) > 2:
        star_type_selec = star_type[0:2]
    else:
        star_type_selec = star_type

    try:
        int(star_type_selec[1])
    except:
        try:
            star_type_selec = star_type[-3:-1]
            int(star_type_selec[1])
        except:
            if not mute:
                print("Returning None. Couldn't parse spectral type.")
            return sampling_wvs, None

    # Sory hard-coded type...
    if star_type_selec == "K8":
        star_type_selec = "K7"

    pykliproot = os.path.dirname(os.path.realpath(__file__))
    filename_temp_lookup = pykliproot + os.path.sep + "pickles" + os.path.sep + "mainseq_colors.txt"
    filename_pickles_lookup = pykliproot + os.path.sep + "pickles" + os.path.sep + "AA_README"

    #a = np.genfromtxt(filename_temp_lookup, names=True, delimiter=' ', dtype=None)

    # The interpolation is based on the temperature of the star
    # If the input was not the temperature then it is taken from the mainseq_colors.txt based on the input spectral type
    if temperature is None:
        #Read pickles list
        dict_temp = dict()
        with open(filename_temp_lookup, 'r') as f:
            for line in f:
                if line.startswith('#'):
                    pass
                else:
                    splitted_line = line.split()
                    # splitted_line[0]: spectral type F5 G0...
                    # splitted_line[2]: Temperature in K
                    dict_temp[splitted_line[0]] = splitted_line[2]

        try:
            target_temp = float(dict_temp[star_type_selec])
        except:
            if not mute:
                print(
                    "Returning None. Couldn't find a temperature for this spectral type in pickles mainseq_colors.txt."
                )
            return sampling_wvs, None
    else:
        target_temp = temperature

    # "AA_README" contains the list of the temperature for which a spectrum is available
    # Read it here
    dict_filename = dict()
    temp_list = []
    with open(filename_pickles_lookup, 'r') as f:
        for line in f:
            if line.startswith('pickles_uk_'):
                splitted_line = line.split()
                # splitted_line[0]: Filename
                # splitted_line[1]: spectral type F5V G0III...
                # splitted_line[2]: Temperature in K

                #Check that the last character is numeric
                spec_type = splitted_line[1]
                if splitted_line[0][len(splitted_line[0]) - 1].isdigit(
                ) and not (spec_type.endswith('IV')
                           or spec_type.endswith('I')):
                    dict_filename[float(splitted_line[2])] = splitted_line[0]
                    temp_list.append(float(splitted_line[2]))

    #temp_list = np.array(dict_filename.keys())
    temp_list = np.array(temp_list)
    # won't work for the hottest and coldest spectra.
    upper_temp, upper_temp_id = find_upper_nearest(temp_list, target_temp)
    lower_temp, lower_temp_id = find_lower_nearest(temp_list, target_temp)
    #print( upper_temp, upper_temp_id,lower_temp, lower_temp_id)

    upper_filename = dict_filename[upper_temp]
    lower_filename = dict_filename[lower_temp]

    upper_filename = pykliproot + os.path.sep + "pickles" + os.path.sep + upper_filename + ".fits"
    lower_filename = pykliproot + os.path.sep + "pickles" + os.path.sep + lower_filename + ".fits"

    hdulist = pyfits.open(upper_filename)
    cube = hdulist[1].data
    upper_wave = []
    upper_spec = []
    for wave_value, spec_value in cube:
        upper_wave.append(wave_value)  # in angstrom
        upper_spec.append(spec_value)
    delta_wave = upper_wave[1] - upper_wave[0]
    upper_wave = np.array(upper_wave) / 10**4  # in mum
    # upper_spec is a density spectrum in flux.A-1 so we need to multiply by delta_wave to integrate and get a flux.
    upper_spec = np.array(upper_spec) * delta_wave

    hdulist = pyfits.open(lower_filename)
    cube = hdulist[1].data
    lower_wave = []
    lower_spec = []
    for wave_value, spec_value in cube:
        lower_wave.append(wave_value)  # in angstrom
        lower_spec.append(spec_value)
    lower_wave = np.array(lower_wave) / 10**4  # in mum
    # lower_spec is a density spectrum in flux.A-1 so we need to multiply by delta_wave to integrate and get a flux.
    lower_spec = np.array(lower_spec) * delta_wave

    sampling_wvs_unique0 = np.insert(sampling_wvs_unique[:-1], 0,
                                     sampling_wvs_unique[0])
    sampling_wvs_unique1 = np.insert(sampling_wvs_unique[1::], -1,
                                     sampling_wvs_unique[-1])
    upper_spec_unique = np.array([
        np.mean(upper_spec[np.where((upper_wave > wv0) * (upper_wave < wv1))])
        for wv0, wv1 in zip(sampling_wvs_unique0, sampling_wvs_unique1)
    ])
    lower_spec_unique = np.array([
        np.mean(lower_spec[np.where((lower_wave > wv0) * (lower_wave < wv1))])
        for wv0, wv1 in zip(sampling_wvs_unique0, sampling_wvs_unique1)
    ])

    # Sometimes the wavelength sampling is weird and the strategy above yields nans in the spectra.
    # When this happens we don't average out the spectra and takes the nearest available sample
    for k in range(np.size(upper_spec_unique)):
        if np.isnan(upper_spec_unique[k]):
            upper_spec_unique[k] = upper_spec[find_nearest(
                upper_wave, sampling_wvs_unique[k])[1]]
    for k in range(np.size(lower_spec_unique)):
        if np.isnan(lower_spec_unique[k]):
            lower_spec_unique[k] = lower_spec[find_nearest(
                lower_wave, sampling_wvs_unique[k])[1]]

    spec_pip_unique = ((target_temp - lower_temp) * upper_spec_unique +
                       (upper_temp - target_temp) * lower_spec_unique) / (
                           upper_temp - lower_temp)

    f = interp1d(sampling_wvs_unique, spec_pip_unique)
    spec_pip = f(sampling_wvs)

    return (sampling_wvs, spec_pip / np.nanmean(spec_pip))
Exemplo n.º 12
0
    x2 = x1
    y2 = y1 / np.cos(incl_rad)
    rho2dellip = np.sqrt(x2**2 + y2**2)

    phony_disk[np.where((rho2dellip > r1) & (rho2dellip < r2))] = 1

    return phony_disk / 100.


dir_test = '/Users/jmazoyer/Dropbox/Work/python/python_data/test_smallGPIlib_RDI/larger_test/'

aligned_center = [140., 140.]

lib_files = sorted(glob.glob(dir_test + '*.fits'))
#print data_files
datasetlib = GPI.GPIData(lib_files, highpass=False, quiet=True)
datasetlib.spectral_collapse(collapse_channels=1,
                             align_frames=True,
                             aligned_center=aligned_center)

# make the PSF library
# we need to compute the correlation matrix of all images vs each other since we haven't computed it before
# psflib = rdi.PSFLibrary(datasetlib.input,aligned_center ,
#                             datasetlib.filenames, compute_correlation=True)

# # save the correlation matrix to disk so that we also don't need to recomptue this ever again
# # In the future we can just pass in the correlation matrix into the PSFLibrary object rather
# # than having it compute it
# psflib.save_correlation(dir_test+"test_results/corr_matrix-SMALLTEST.fits", overwrite=True)

# read in the correlation matrix we already saved
Exemplo n.º 13
0
def test_spectral_collapse():
    """
    Tests the spectral collpase feature
    """
    # grab the files
    filelist = glob.glob(testdir +
                         os.path.join("data", "S20131210*distorcorr.fits"))
    # hopefully there is still 3 filelists
    assert (len(filelist) == 3)

    # create the dataset object
    dataset = GPI.GPIData(filelist, highpass=False)

    # collapse into 2 channels
    dataset.spectral_collapse(collapse_channels=2)

    assert (dataset.input.shape[0] == len(filelist) * 2)
    assert (np.size(dataset.spot_flux) == len(filelist) * 2)

    # collapse again, now into broadband
    dataset.spectral_collapse()

    assert (dataset.input.shape[0] == len(filelist))

    # run a broadband reduction
    outputdir = testdir
    prefix = "broadbandcollapse-betapic-j-k100a9s4m1-fakes50pa50"
    parallelized.klip_dataset(dataset,
                              outputdir=outputdir,
                              fileprefix=prefix,
                              annuli=9,
                              subsections=4,
                              movement=1,
                              numbasis=[1],
                              calibrate_flux=True,
                              mode="ADI",
                              lite=False,
                              highpass=True)

    # look at the output data. Validate the KL mode cube
    kl_hdulist = fits.open("{out}/{pre}-KLmodes-all.fits".format(out=outputdir,
                                                                 pre=prefix))
    klframe = kl_hdulist[1].data[0]

    # check beta pic b is where we think it is
    true_sep = 426.6 / 1e3 / GPI.GPIData.lenslet_scale  # in pixels
    true_pa = 212.2  # degrees

    # find planet in collapsed cube
    flux_meas, x_meas, y_meas, fwhm_meas = fakes.retrieve_planet(
        klframe,
        dataset.output_centers[0],
        dataset.output_wcs[0],
        true_sep,
        true_pa,
        searchrad=4,
        guesspeak=2.e-5,
        guessfwhm=2)
    print(flux_meas, x_meas, y_meas, fwhm_meas)

    # positonal error
    theta = fakes.convert_pa_to_image_polar(true_pa, dataset.output_wcs[0])
    true_x = true_sep * np.cos(np.radians(theta)) + dataset.output_centers[0,
                                                                           0]
    true_y = true_sep * np.sin(np.radians(theta)) + dataset.output_centers[0,
                                                                           1]
    assert np.abs(true_x - x_meas) < 0.4
    assert np.abs(true_y - y_meas) < 0.4
# basedir = '/Users/jmazoyer/Dropbox/ExchangeFolder/data_python/tycho/'

basedir = os.environ["EXCHANGE_PATH"]  # the base directory where is

sequence = '20161221_H_Spec'
# sequence = '160318_H_Spec_smalltest'
# sequence = '150403_K1_Spec'
# sequence = '150403_K2_Spec'
# sequence = '160318_H_Spec'
# sequence = '160323_J_Spec'

basedir = basedir + sequence + '/'

filelist = glob.glob(basedir + "*_spdc_distorcorr.fits")

dataset = GPI.GPIData(filelist, quiet=True)

initial_wl_number = np.round(dataset.input.shape[0] / len(filelist))

## load the model. We don't care about the normalization of the model
# model_initial_non_convolved = fits.getdata("/Users/jmazoyer/Dropbox/STSCI/python/python_data/KlipFM_for_SPF/hr_4796/model_mcmc/Hband_modelbeforeConv.fits")
model_initial_non_convolved = fits.getdata(
    basedir +
    "results_MCMC/Hd32297_Hband_adikl15_backend_file_mcmc_BestModel.fits")

## either use the PSF made from the sat spots
# psf_by_wavelength = fits.getdata(basedir + sequence + '_sat_spots_averaged.fits')
# psf_all = np.nanmean(psf_by_wavelength, axis = 0)

## or the PSF made from the unobstructed image
# if sequence == '140325_K1_Spec' |sequence == '150402_K1_Spec' | sequence == '150403_K1_Spec':
Exemplo n.º 15
0
def do_fm_pyklip(modfm, dataset, new_model):
    import pyklip.instruments.GPI as GPI
    from pyklip import fm
    from pyklip.fmlib import diskfm
    # Define KLIP parameters used for data.
    # Settings are for a9s1mv1_medcollapse.
    ann = 9
    subs = 1
    mvmt = 1
    minrot = None
    highpass = False
    sufx = '_%s_%s_%slk' % (mctype, s_ident, which)
    kl = 1

    # NOTE that a9s1mv1_medcollapse used subset of images: 70-99 inclusive.
    fl = [
        path_data +
        'S20160228S%04d_spdc_distorcorr_phot_4p_hpNone_Jy_arcsec-2.fits' % ii
        for ii in range(70, 114)
    ]

    dataset = GPI.GPIData(fl, highpass=highpass, meas_satspot_flux=False)

    # Manually decreasing inner working angle to improve inner KLIP.
    dataset.IWA = 10  # [pix]
    dataset.OWA = 135
    # Manually set plate scale to best known value.
    dataset.lenslet_scale = 0.014166  # [arcsec/pix] best as of 6-2016
    numbasis = np.array([1, 2, 3, 10, 20, 50])
    maxnumbasis = 50

    star = np.array([140, 140])  #np.mean(dataset.centers, axis=0)
    collapse_spec = True

    # If desired, collapse the spec cube as sum of wavelength channels.
    if collapse_spec and dataset.prihdrs[0]['DISPERSR'] != 'WOLLASTON':
        input_collapsed = []
        ## Average all spec cubes along wavelength axis.
        # Sum each spec cube along wavelength axis to collapse channels.
        for fn in fl:
            # input_collapsed.append(numpy.nanmedian(fits.getdata(fn), axis=0))
            input_collapsed.append(np.sum(fits.getdata(fn), axis=0))
        input_collapsed = np.array(input_collapsed)
        dataset.input = input_collapsed

        # Average centers of all wavelength slices and store as new centers.
        centers_collapsed = []
        sl = 0
        while sl < dataset.centers.shape[0]:
            centers_collapsed.append(
                np.mean(dataset.centers[sl:sl + 37], axis=0))
            sl += 37
        centers_collapsed = np.array(centers_collapsed)
        dataset.centers = centers_collapsed

        # Reduce dataset info from 37 slices to 1 slice.
        dataset.PAs = dataset.PAs[list(range(0, len(dataset.PAs), 37))]
        dataset.filenums = dataset.filenums[list(
            range(0, len(dataset.filenums), 37))]
        dataset.filenames = dataset.filenames[list(
            range(0, len(dataset.filenames), 37))]
        dataset.wcs = dataset.wcs[list(range(0, len(dataset.wcs), 37))]

        # Lie to pyklip about wavelengths.
        dataset.wvs = np.ones(input_collapsed.shape[0])

    # Create object from diskfm.DiskFM class.
    print("\nInitializing DiskFM object...")
    modfm = diskfm.DiskFM(dataset.input.shape,
                          np.array(numbasis),
                          dataset,
                          mod_I,
                          load_from_basis=load_from_basis,
                          save_basis=save_basis,
                          annuli=ann,
                          subsections=subs,
                          OWA=dataset.OWA,
                          basis_filename=basis_fn,
                          numthreads=numthreads)

    # TEMP!!!
    modfm.maxnumbasis = maxnumbasis
    # modfm.numthreads = numthreads

    if mvmt is not None:
        fname = 'hd35841_pyklipfm_a%ds%dmv%d_hp%.1f_k%d-%d' % (
            ann, subs, mvmt, highpass, numbasis[0], numbasis[-1]) + sufx
    elif minrot is not None:
        fname = 'hd35841_pyklipfm_a%ds%dmr%d_hp%.1f_k%d-%d' % (
            ann, subs, minrot, highpass, numbasis[0], numbasis[-1]) + sufx

    if load_from_basis:
        # # Set model's aligned center property (do usual swap of y,x).
        # modfm.aligned_center = mod_cen_aligned[::-1]

        # Use loaded basis vectors to FM the original disk model (get images grouped by KL mode).
        fmsub_mod_imgs = modfm.fm_parallelized()

        # # Save the fm output FITS to disk.
        # modfm.save_fmout(dataset, fmsub_mod_imgs, path[:-1], fname, numbasis, '', False, None)

        # Take mean across the FM'd images for each KL mode.
        fmsub_mod = np.mean(fmsub_mod_imgs, axis=1)
        # Mask interior to the IWA (pyklip includes r=IWA pixels in first annulus).
        fmsub_mod[:, radii_data < dataset.IWA] = np.nan

        mod_I_fm = fmsub_mod[np.where(numbasis == kl)[0][0]]

    else:
        # FIX ME!!! FM without saved bases is likely broken.
        # pyklip FM the model dataset (similar yet distinct function from pyklip.klip_dataset)
        # This writes the self-subtracted model and the klip'd data to disk but does not
        # output any arguments.
        if ann == 1:
            padding = 0
        else:
            padding = 3

        print(
            "KLIP FM without a saved basis set is NOT FUNCTIONAL! Will probably fail."
        )
        fmout = fm.klip_dataset(
            dataset,
            modfm,
            mode='ADI',
            outputdir=path_data,
            fileprefix=fname,
            annuli=ann,
            subsections=subs,
            OWA=dataset.OWA,
            N_pix_sector=None,
            movement=mvmt,
            minrot=minrot,
            numbasis=np.array(numbasis),
            maxnumbasis=maxnumbasis,
            numthreads=numthreads,
            calibrate_flux=False,
            aligned_center=star[::-1],  #aligned_center=mod_cen_aligned[::-1]
            spectrum=None,
            highpass=highpass,
            save_klipped=False,
            padding=padding,
            mute_progression=False)

    # Update the model image in modfm object to a new model.
    modfm.update_disk(new_model)
    # # Load the KL basis info from log file instead of slowly recalculating.
    #    modfm.load_basis_files(modfm.basis_filename)
    # FM the new disk model.
    fmsub_mod_imgs = modfm.fm_parallelized()

    # # Save the fm output FITS to disk.
    # modfm.save_fmout(dataset, fmsub_mod_imgs, path[:-1], fname, numbasis, '', False, None)

    # Take mean across the FM'd images for each KL mode.
    fmsub_mod = np.nanmean(fmsub_mod_imgs, axis=1)
    # # Mask interior to the IWA (pyklip includes r=IWA pixels in first annulus).
    # fmsub_mod[:, radii < dataset.IWA] = numpy.nan

    return fmsub_mod
Exemplo n.º 16
0
def test_fmastrometry():
    """
    Tests FM astrometry using MCMC + GP Regression

    """
    # time it
    t1 = time.time()

    # # open up already generated FM and data_stamp
    # fm_hdu = fits.open("/home/jwang/GPI/betapic/fm_models/final_altpsf/pyklip-131118-h-k100m4-dIWA8-nohp-klipfm-KL7cube.fits")
    # data_hdu = fits.open("/home/jwang/GPI/betapic/klipped/final_altpsf/pyklip-131118-h-k100m4-dIWA8-nohp-onezone-KL7cube.fits")

    ########### generate FM ############
    # grab the files
    filelist = glob.glob(testdir +
                         os.path.join("data", "S20131210*distorcorr.fits"))
    filelist.sort()

    # hopefully there is still 3 filelists
    assert (len(filelist) == 3)

    # only read in one spectral channel
    skipslices = [i for i in range(37) if i != 7 and i != 33]
    # read in data
    dataset = GPI.GPIData(filelist, highpass=9, skipslices=skipslices)

    numwvs = np.size(np.unique(dataset.wvs))
    assert (numwvs == 2)

    # save old centesr for later
    oldcenters = np.copy(dataset.centers)

    # generate PSF
    dataset.generate_psfs(boxrad=25 // 2)
    dataset.psfs /= (np.mean(dataset.spot_flux.reshape(
        [dataset.spot_flux.shape[0] // numwvs, numwvs]),
                             axis=0)[:, None, None])

    # read in model spectrum
    model_file = os.path.join(testdir, "..", "pyklip", "spectra", "cloudy",
                              "t1600g100f2.flx")
    spec_dat = np.loadtxt(model_file)
    spec_wvs = spec_dat[1]
    spec_f = spec_dat[3]
    spec_interp = sinterp.interp1d(spec_wvs, spec_f, kind='nearest')
    inputspec = spec_interp(np.unique(dataset.wvs))

    # setup FM guesses
    numbasis = np.array([1, 7, 100])
    guesssep = 0.4267 / GPI.GPIData.lenslet_scale
    guesspa = 212.15
    guessflux = 5e-5
    print(guesssep, guesspa)
    fm_class = fmpsf.FMPlanetPSF(dataset.input.shape,
                                 numbasis,
                                 guesssep,
                                 guesspa,
                                 guessflux,
                                 dataset.psfs,
                                 np.unique(dataset.wvs),
                                 dataset.dn_per_contrast,
                                 star_spt='A6',
                                 spectrallib=[inputspec])
    # run KLIP-FM
    prefix = "betpic-131210-j-fmpsf"
    fm.klip_dataset(dataset,
                    fm_class,
                    outputdir=testdir,
                    fileprefix=prefix,
                    numbasis=numbasis,
                    annuli=[[guesssep - 15, guesssep + 15]],
                    subsections=1,
                    padding=0,
                    movement=2)

    # before we do anything else, check that dataset.centers remains unchanged
    assert (dataset.centers[0][0] == oldcenters[0][0])

    # read in outputs
    output_prefix = os.path.join(testdir, prefix)
    fm_hdu = fits.open(output_prefix + "-fmpsf-KLmodes-all.fits")
    data_hdu = fits.open(output_prefix + "-klipped-KLmodes-all.fits")

    # get FM frame
    fm_frame = np.nanmean(fm_hdu[1].data, axis=0)
    fm_centx = fm_hdu[1].header['PSFCENTX']
    fm_centy = fm_hdu[1].header['PSFCENTY']

    # get data_stamp frame
    data_frame = np.nanmean(data_hdu[1].data, axis=0)
    data_centx = data_hdu[1].header["PSFCENTX"]
    data_centy = data_hdu[1].header["PSFCENTY"]

    # get initial guesses
    guesssep = fm_hdu[0].header['FM_SEP']
    guesspa = fm_hdu[0].header['FM_PA']

    # create FM Astrometry object
    fma = fitpsf.FMAstrometry(guesssep, guesspa, 9)

    # generate FM stamp
    fma.generate_fm_stamp(fm_frame, [fm_centx, fm_centy], padding=5)

    # generate data_stamp stamp
    fma.generate_data_stamp(data_frame, [data_centx, data_centy], dr=6)

    # set kernel, with read noise
    fma.set_kernel("matern32", [3.], [r"$l$"], True, 0.05)

    # set bounds
    fma.set_bounds(1.5, 1.5, 1, [1.], 1)

    print(fma.guess_RA_offset, fma.guess_Dec_offset)
    print(fma.bounds)
    # test likelihood function
    mod_bounds = np.copy(fma.bounds)
    mod_bounds[2:] = np.log(mod_bounds[2:])
    print(mod_bounds)
    lnpos = fitpsf.lnprob((-16, -25.7, np.log(0.8), np.log(3.3), np.log(0.05)),
                          fma,
                          mod_bounds,
                          fma.covar,
                          readnoise=True)
    print(lnpos, np.nanmean(data_frame), np.nanmean(fm_frame),
          np.nanmean(fma.data_stamp), np.nanmean(fma.fm_stamp))
    assert lnpos > -np.inf

    # run MCMC fit
    fma.fit_astrometry(nburn=150, nsteps=25, nwalkers=50, numthreads=1)

    print("{0} seconds to run".format(time.time() - t1))

    fma.propogate_errs(star_center_err=0.05,
                       platescale=GPI.GPIData.lenslet_scale * 1000,
                       platescale_err=0.007,
                       pa_offset=-0.1,
                       pa_uncertainty=0.13)

    assert (np.abs(fma.RA_offset.bestfit - -227.2) < 5.)
    assert (np.abs(fma.Dec_offset.bestfit - -361.1) < 5.)

    fma.best_fit_and_residuals()
    plt.savefig("tests/bka2.png")