Example #1
0
def test_mock_SDI(mock_klip_parallelized):
    """
    Tests SDI reduction with mocked data. 

    Args: `
        mock_klip_parallelized: mock patch object. 
    """

    #create a mocked return value for klip_parallelized that returns a 4d array of size (b,N,y,x) of zeros.
    mock_klip_parallelized.return_value = (np.zeros(
        (4, 111, 281, 281)), np.array([140, 140]))

    # time it
    t1 = time()

    # grab the files
    filelist = glob.glob(testdir +
                         os.path.join("data", "S20131210*distorcorr.fits"))
    assert (len(filelist) == 3)

    # create the dataset object
    dataset = GPI.GPIData(filelist)

    # run klip parallelized in SDI mode
    outputdir = testdir
    prefix = "mock"
    parallelized.klip_dataset(dataset,
                              outputdir=outputdir,
                              fileprefix=prefix,
                              annuli=9,
                              subsections=4,
                              movement=1,
                              numbasis=[1, 20, 50, 100],
                              calibrate_flux=True,
                              mode="SDI")

    mocked_glob = glob.glob(testdir + 'mock*')
    assert (len(mocked_glob) == 5)

    print("{0} seconds to run".format(time() - t1))
Example #2
0
print("Enter the path to the working sliced directory (ending with 'sliced/'):")
dir = input()
print("Enter a name for the ouput file:")
name = input()
print("Enter number of annuli:")
annuli2 = input()
print("Enter movement parameter:")
movement2 = input()
filelist = glob.glob(str(dir) + "*.fits")
#filelist = glob.glob("spiral/sliced/*.fits")
dataset = MAGAO.MAGAOData(filelist)
#dataset = GPI.GPIData(filelist)

outputFileName = str(name)

parallelized.klip_dataset(dataset, outputdir="", fileprefix=outputFileName, annuli=annuli2, 
	subsections=1, movement=movement2, numbasis=[1,2,3,4,5,10,20,50,100], calibrate_flux=False, mode="ADI")

print("Shape of dataset.output is " + str(dataset.output.shape))
print("Shape of dataset.output[1] is " + str(dataset.output[1].shape))
avgframe = np.nanmean(dataset.output[1], axis=(0,1))
print("Shape of avgframe is " + str(avgframe.shape))
calib_frame = dataset.calibrate_output(avgframe)

print("Shape of calib_frame: " + str(calib_frame.shape))
#seps, contrast = klip.meas_contrast(calib_frame, dataset.IWA, 1.1/GPI.GPIData.lenslet_scale, 3.5)

print("Completed klipping. Rotating images")
hdulist = fits.open(outputFileName+"-KLmodes-all.fits")
cube = hdulist[1].data
hdulist.close()
cube = cube[:,:,::-1]
Example #3
0
def test_exmaple_gpi_klip_dataset():
    """
    Tests standard pykip.parallelized.klip_dataset() with GPI data from the tutorial. Uses no spectral template

    """
    # time it
    t1 = time()

    # grab the files
    filelist = glob.glob(testdir +
                         os.path.join("data", "S20131210*distorcorr.fits"))
    # hopefully there is still 3 filelists
    assert (len(filelist) == 3)

    # create the dataset object
    dataset = GPI.GPIData(filelist, highpass=True)

    # run klip parallelized
    outputdir = testdir
    prefix = "example-betapic-j-k100a9s4m1"
    parallelized.klip_dataset(dataset,
                              outputdir=outputdir,
                              fileprefix=prefix,
                              annuli=9,
                              subsections=4,
                              movement=1,
                              numbasis=[1, 20, 50, 100],
                              calibrate_flux=True,
                              mode="ADI+SDI")

    # look at the output data. Validate the spectral cube
    spec_hdulist = fits.open("{out}/{pre}-KL20-speccube.fits".format(
        out=outputdir, pre=prefix))
    speccube_kl20 = spec_hdulist[1].data

    # check to make sure it's the right shape
    assert (speccube_kl20.shape == (37, 281, 281))

    # look at the output data. Validate the KL mode cube
    kl_hdulist = fits.open("{out}/{pre}-KLmodes-all.fits".format(out=outputdir,
                                                                 pre=prefix))
    klcube = kl_hdulist[1].data

    # check to make sure it's the right shape
    assert (klcube.shape == (4, 281, 281))

    # try to retrieve beta pic b.
    # True astrometry taken from Wang et al.(2016)
    true_sep = 426.6 / 1e3 / GPI.GPIData.lenslet_scale  # in pixels
    true_pa = 212.2  # degrees
    # guessing flux and FWHM
    true_flux = 1.7e-5
    true_fwhm = 2.3  # ~lambda/D for lambda=1.25 microns, D=8 m

    # find planet in collapsed cube
    collapsed_kl20 = klcube[1]
    flux_meas, x_meas, y_meas, fwhm_meas = fakes.retrieve_planet(
        collapsed_kl20,
        dataset.output_centers[0],
        dataset.output_wcs[0],
        true_sep,
        true_pa,
        searchrad=4,
        guesspeak=2.e-5,
        guessfwhm=2)
    print(flux_meas, x_meas, y_meas, fwhm_meas)

    # error thresholds
    # flux error
    assert np.abs((flux_meas - true_flux) / true_flux) < 0.4
    # positonal error
    theta = fakes.convert_pa_to_image_polar(true_pa, dataset.output_wcs[0])
    true_x = true_sep * np.cos(np.radians(theta)) + dataset.output_centers[0,
                                                                           0]
    true_y = true_sep * np.sin(np.radians(theta)) + dataset.output_centers[0,
                                                                           1]
    assert np.abs(true_x - x_meas) < 0.4
    assert np.abs(true_y - y_meas) < 0.4
    # fwhm error
    assert np.abs(true_fwhm - fwhm_meas) < 0.4

    # measure SNR of planet

    print("{0} seconds to run".format(time() - t1))
Example #4
0
def test_adi_gpi_klip_dataset_with_fakes_twice(filelist=None):
    """
    Tests ADI reduction with fakes injected at certain position angles. And tests we can run it twice and still be ok

    Also tests lite mode

    Args:
        filelist: if not None, supply files to test on. Otherwise use standard beta pic data
    """
    # time it
    t1 = time()

    # grab the files
    if filelist is None:
        filelist = glob.glob(testdir +
                             os.path.join("data", "S20131210*distorcorr.fits"))

        # hopefully there is still 3 filelists
        assert (len(filelist) == 3)

    # create the dataset object
    dataset = GPI.GPIData(filelist,
                          skipslices=[0, 36],
                          bad_sat_spots=[3],
                          highpass=False)

    # save old centesr for later
    oldcenters = np.copy(dataset.centers)

    dataset.generate_psfs(boxrad=25 // 2)
    assert np.max(dataset.psfs > 0)

    # inject fake planet
    fake_seps = [20, 50, 40, 30]  # pixels
    fake_pas = [-50, -165, 130, 10]  # degrees
    fake_contrasts = np.array([1.e-4, 3.e-5, 5.e-5, 1.e-4])  # bright planet
    fake_injected_fluxes = fake_contrasts * np.mean(dataset.dn_per_contrast)
    for fake_sep, fake_pa, fake_flux in zip(fake_seps, fake_pas,
                                            fake_injected_fluxes):
        fakes.inject_planet(dataset.input, dataset.centers, fake_flux,
                            dataset.wcs, fake_sep, fake_pa)

    # run klip parallelized
    outputdir = testdir
    prefix = "adionly-betapic-j-k100a9s4m1-fakes50pa50"
    parallelized.klip_dataset(dataset,
                              outputdir=outputdir,
                              fileprefix=prefix,
                              annuli=9,
                              subsections=4,
                              movement=1,
                              numbasis=[1, 20, 50, 100],
                              calibrate_flux=False,
                              mode="ADI",
                              lite=True,
                              highpass=False)

    # before we do it again, check that dataset.centers remains unchanged
    assert (dataset.centers[0][0] == oldcenters[0][0])

    # And run it again to check that we can reuse the same dataset object
    parallelized.klip_dataset(dataset,
                              outputdir=outputdir,
                              fileprefix=prefix,
                              annuli=9,
                              subsections=4,
                              movement=1,
                              numbasis=[1, 20, 50, 100],
                              calibrate_flux=True,
                              mode="ADI",
                              lite=False,
                              highpass=True)

    # look at the output data. Validate the spectral cube
    spec_hdulist = fits.open("{out}/{pre}-KL20-speccube.fits".format(
        out=outputdir, pre=prefix))
    speccube_kl20 = spec_hdulist[1].data

    # check to make sure it's the right shape
    assert (speccube_kl20.shape == (35, 281, 281))

    # look at the output data. Validate the KL mode cube
    spec_hdulist = fits.open("{out}/{pre}-KLmodes-all.fits".format(
        out=outputdir, pre=prefix))
    klcube = spec_hdulist[1].data

    # check to make sure it's the right shape
    assert (klcube.shape == (4, 281, 281))

    # collapse data
    collapsed_kl20 = klcube[1]

    # try to retrieve fake planet
    for fake_sep, fake_pa, fake_contrast in zip(fake_seps, fake_pas,
                                                fake_contrasts):
        peakflux = fakes.retrieve_planet_flux(collapsed_kl20,
                                              dataset.output_centers[0],
                                              dataset.output_wcs[0],
                                              fake_sep,
                                              fake_pa,
                                              refinefit=True)

        assert (np.abs((peakflux / 0.7 - fake_contrast) / fake_contrast) < 0.5)

    print("{0} seconds to run".format(time() - t1))
Example #5
0
                if (len([k for k in klmodes if not k in klmodes2]) == 0):
                    print(
                        "Found KLIP processed images for same parameters saved to disk. Reading in data."
                    )
                    #don't re-run KLIP
                    runKLIP = False

            if (runKLIP):
                print("Starting KLIP")
                #run klip for given parameters
                parallelized.klip_dataset(dataset,
                                          outputdir=(pathToFiles + "_klip/"),
                                          fileprefix=outputFileName,
                                          annuli=numAnn,
                                          subsections=s,
                                          movement=m,
                                          numbasis=klmodes,
                                          calibrate_flux=False,
                                          mode="ADI",
                                          highpass=highpass,
                                          time_collapse='median')

                #collapse in time dimension
                incube = np.nanmedian(dataset.output, axis=1)
                #truncates wavelength dimension, which we don't use
                incube = incube[:, 0, :, :]
                #print('check: input image shape goes from', dataset.output.shape, 'to', incube.shape)

            #list of noise calculation methods
            methods = ['stddev', 'med']
Example #6
0
def explore_params(path_to_files, outfile_name, iwa, klmodes, annuli_start, annuli_stop, movement_start, 
    movement_stop, FWHM, ra, pa, wid, annuli_inc=1, movement_inc=1, subsections_start=False, subsections_stop=False, subsections_inc=False,  
    smooth=False, input_contrast=False, time_collapse='median', highpass = True, owa=False,
    saveSNR = True, singleAnn = False, boundary=False, verbose = False, snrsmt = False,
    calibrate_flux=False):

    #default is 1 subsection
    if subsections_start == False:
        if (subsections_stop != False) or (subsections_inc != False):
            print("must set subsections_start, subsections_stop, and subsections_inc together")
            return()
        subsections_start = 1
        subsections_stop = 1
        subsections_inc = 1

    #pre-klip smooth off = smoothing value of 0
    if smooth == False:
        smooth=0.0
    
    if verbose is True:
        print(f"File Path = {path_to_files}")   
        print()
        print(f"Output Filename = {outfile_name}")
        print("Parameters to explore:")
        print(f"Annuli: start = {annuli_start}; end = {annuli_stop}; increment = {annuli_inc}")
        print(f"Subsections: start = {subsections_start}; end = {subsections_stop}; increment = {subsections_inc} ")
        print(f"Movement: start = {movement_start}; end = {movement_stop}; increment = {movement_inc} ")
        print(f"IWA = {iwa}, KL Modes = {klmodes}, FWHM = {FWHM}, Smoothing Value = {smooth}")
        print()
        print("Planet Parameters")
        print(f"Radius= {ra}, Position Angle = {pa}, Mask Width = {wid}, Input Contrast - {input_contrast}") #, X Positions = {x_positions}, Y Positions = {y_positions} ")
        print()
        print("reading: " + path_to_files + "/*.fits")

    # create directory to save ouput to
    if not os.path.exists(path_to_files + "_klip"):
        os.makedirs(path_to_files + "_klip")
    
    # create tuples for easier eventual string formatting when saving files
    annuli = (annuli_start, annuli_stop, annuli_inc)
    movement = (movement_start, movement_stop, movement_inc)
    subsections = (subsections_start, subsections_stop, subsections_inc)

    # if only one parameter is iterated over, makes sure increment is 1 and changes touple to single int
    if(annuli_start == annuli_stop):
        annuli_inc = 1
        annuli = annuli_start

    # if parameter is not set to change, makes sure increment is 1 and changes touple to single int
    if(movement_start == movement_stop):
        movement_inc = 1
        movement = movement_start

    # if parameter is not set to change, makes sure increment is 1 and changes touple to single int
    if(subsections_start == subsections_stop):
        subsections_inc = 1
        subsections = subsections_start

    # check that position angle and radius lists have the same number of elements
    if len(ra) != len(pa):
        print("List of separations is not equal in length to list of position angles. Duplicating to match.")
        ra=np.repeat(ra,len(pa))

    # object to hold mask parameters for snr map 
    mask = (ra, pa, wid)

    nplanets = len(ra)
    if verbose is True:
        print(nplanets, "planets with separations ", ra, "and PAs ", pa)
    
    # Add suffix to filenames depending on user-specified values
    suff = ''    
    if singleAnn is True:
        suff += '_min-annuli'
    
    if highpass is True:
        suff += '_highpass'

    if type(highpass)!=bool:
        suff+= '_hp'+str(highpass)

    if verbose is True:
    
        print("Reading: " + path_to_files + "/*.fits")
        
        start_time = time.time()
        print("Start clock time is", time.time())
        
        start_process_time = time.process_time()
        print("Start process time is", time.process_time())
        
    # grab generic header from a generic single image
    hdr = fits.getheader(path_to_files + '/sliced_1.fits')

    # erase values that change through image cube
    del hdr['ROTOFF']
    try:
        del hdr['GSTPEAK']
    except:
        print('NOT a saturated dataset')
    del hdr['STARPEAK']
    
    # reads in files
    filelist = glob.glob(path_to_files + '/*.fits')

    # Get star peaks
    starpeak = []
    for i in np.arange(len(filelist)):
        head = fits.getheader(filelist[i])
        starpeak.append(head["STARPEAK"])

    dataset = MagAO.MagAOData(filelist)
    #make a clean copy of the dataset that will be pulled each time (parallelized modifies dataset.input object)
    dataset_input_clean = np.copy(dataset.input)

    palist = sorted(dataset._PAs)
    palist_clean = [pa if (pa < 360) else pa-360 for pa in palist]
    palist_clean_sorted = sorted(palist_clean)
    totrot = palist_clean_sorted[-1]-palist_clean_sorted[0]

    if verbose is True:
        print(f"total rotation for this dataset is {totrot} degrees")

    # set IWA and OWA
    dataset.IWA = iwa

    if owa is False:
        xDim = dataset._input.shape[2]
        yDim = dataset._input.shape[1]
        dataset.OWA = min(xDim,yDim)/2
        owa = dataset.OWA
    else:
        dataset.OWA = owa

    # Make function to write out data 
    def writeData(im, prihdr, annuli, movement, subsections, snrmap = False, pre = ''):
        #function writes out fits files with important info captured in fits headers
        
        #if program iterates over several parameter values, formats these for fits headers and file names
        if (isinstance(annuli, tuple)):
            annuli_fname = str(annuli[0]) + '-' + str(annuli[1]) + 'x' + str(annuli[2])
            annuli_head = str(annuli[0]) + 'to' + str(annuli[1]) + 'by' + str(annuli[2])  
        else: 
            annuli_fname = annuli
            annuli_head = annuli

        if (isinstance(movement, tuple)):
            movement_fname = str(movement[0]) + '-' + str(movement[1]) + 'x' + str(movement[2])
            movement_head = str(movement[0]) + 'to' + str(movement[1]) + 'by' + str(movement[2])
        else: 
            movement_head = movement
            movement_fname = movement

        if (isinstance(subsections, tuple)):
            subsections_head = str(subsections[0]) + 'to' + str(subsections[1]) + 'by' + str(subsections[2])
            subsections_fname = str(subsections[0]) + '-' + str(subsections[1]) + '-' + str(subsections[2])
        else:
            subsections_head = subsections
            subsections_fname = subsections


        #shortens file path to bottom 4 directories so it will fit in fits header
        try:
            path_to_files_short = '/'.join(path_to_files.split(os.path.sep)[-4:])
        except:
            path_to_files_short = path_to_files
                
        #adds info to fits headers
        prihdr['ANNULI']=str(annuli_head)
        prihdr['MOVEMENT']=str(movement_head)
        prihdr['SUBSCTNS']=str(subsections_head)
        prihdr['IWA'] = str(iwa)
        prihdr['KLMODES']=str(klmodes)
        prihdr['FILEPATH']=str(path_to_files_short)
        prihdr['OWA']=str(dataset.OWA)
        prihdr['TIMECOLL']=str(time_collapse)
        prihdr['CALIBFLUX']=str(calibrate_flux)
        prihdr["HIGHPASS"]=str(highpass)

    
        if(snrmap):
            rad, pa, wid = mask 
            prihdr['MASK_RAD']=str(rad)
            prihdr['MASK_PA']=str(pa)
            prihdr['MASK_WID']=str(wid)
            prihdr['SNRSMTH']=str(smooth)
            prihdr['SNRFWHM']=str(FWHM)

        if isinstance(annuli, tuple):
            prihdr["SLICE1"]="planet peak value under mask in standard deviation noise map"
            prihdr["SLICE2"] = "planet peak value under mask in median absolute value noise map"
            prihdr["SLICE3"] = "average value of positive pixels under mask in standard deviation noise map"
            prihdr["SLICE4"] = "average value of positive pixels under mask in median absolute value noise map"
            prihdr["SLICE5"] = "total number of pixels >5sigma outside of mask in standard deviation noise map"
            prihdr["SLICE6"] = "total number of pixels >5sigma outside of mask in median absolute value noise map"
            prihdr["SLICE7"] = "total number of pixels >5sigma outside of mask and at similar radius in standard deviation noise map"
            prihdr["SLICE8"] = "total number of pixels >5sigma outside of mask and at similar radius in median absolute value noise map"
            prihdr["SLICE9"] = "calibrated contrast value of planet/s at a given separation"

        #writes out files
        fits.writeto(str(path_to_files) + "_klip/" + str(pre)  + outfile_name + "_a" + str(annuli_fname) + "m" + str(
            movement_fname) + "s" + str(subsections_fname) + "iwa" + str(iwa) + suff + '-KLmodes-all.fits', im, prihdr, overwrite=True)

        return


    # create cube to eventually hold parameter explorer data
    PECube = np.zeros((9,int((subsections_stop-subsections_start)/subsections_inc+1), len(klmodes), int(nplanets),
                        int((annuli_stop-annuli_start)/annuli_inc+1),
                        int((movement_stop-movement_start)/movement_inc+1)))
    
    # BEGIN LOOPS OVER ANNULI, MOVEMENT AND SUBSECTION PARAMETERS
    
    # used for indexing: keeps track of number of annuli values that have been tested
    acount = 0
    
    for a in range(annuli_start, annuli_stop+1, annuli_inc):
    
        # calculate size of annular zones
        dr = float(owa-iwa)/a

        # creates list of zone radii
        all_bounds = [dr*rad+iwa for rad in range(a+1)]

        planet_annuli = [a for a in all_bounds if (a<ra[-1]+dr) and (a>ra[0])]
        nplanet_anns = len(planet_annuli)

     
        ann_cen_rad = [ a - dr/2 for a in planet_annuli ]

        if verbose is True:
            print("planets span ", nplanet_anns, "annular zones for annuli = ", a)

        # print('annuli bounds are', all_bounds)
        numAnn = a
        
        if(singleAnn):
            #find maximum annulus boundary radius that is still inside innermost planet injection radius
            lowBound = max([b for b in all_bounds if (min(ra)>b)])
            #find minimum exterior boundary radius that is outside outermost planet injection radius
            upBound = min([b for b in all_bounds if (max(ra)<b)])
            #list of zone boundaries for planets between the two bounds
            all_bounds = [b for b in all_bounds if (b>=lowBound and b<=upBound)]
            numAnn = int(round((upBound-lowBound)/dr))
            #reset iwa and owa to correspond to annulus
            dataset.IWA = lowBound
            dataset.OWA = upBound
    
        #if boundary keyword is set, check to see if any planets are too close to annuli boundaries
        if boundary != False:
            #is planet within +/- number set as boundary pixels
            if not (len( [b for b in all_bounds for r in ra if(b <= r+boundary and b >= r-boundary)] ) == 0):
                print([b for b in all_bounds for r in ra if(b <= r+boundary and b >= r-boundary)])
                print("A planet is near annulus boundary; skipping KLIP for annuli = " + str(a))
                #assign a unique value as a flag for these cases in the parameter explorer map
                PECube[:,:,:,:,acount,:] = np.nan
                #break out of annuli loop before KLIPing
                acount=1
                continue

        # used for indexing: keeps track of number of movement values that have been tested
        mcount = 0
    
        for m in tqdm(np.arange(movement_start, movement_stop+1, movement_inc)):

            #figure out whether there is enough range 

            if np.arctan(m/ann_cen_rad[0])*180/np.pi>totrot:
                if verbose is True:
                    print("movement", m, "=" "%5.1f" % (np.arctan(m/ann_cen_rad[0])*180/np.pi), 
                        "deg. for inner planet annulus. Only ", "%5.1f" % (totrot), 
                        "available. skipping this movement/annuli combo") 
                PECube[:,:,:,:,acount,mcount] = np.nan
                mcount+=1
                continue

            else:
                scount = 0
        
                for s in range(subsections_start, subsections_stop+1, subsections_inc):

                    klipstr = "_a" + str(a) + "m" + str(m) + "s" + str(s) + "iwa" + str(iwa) 
                    fname  = str(path_to_files) + "_klip/" + outfile_name + klipstr+ suff + '-KLmodes-all.fits'

                    if verbose is True:  
                        if(singleAnn):
                            print("Parameters: movement = %s; subections = %d" %(m,s))
                            print("Running for %d annuli, equivalent to single annulus of width %s pixels" %(annuli_start+acount, dr))
                        else:
                            print("Parameters: annuli = %d; movement = %s; subections = %d" %(a, m,s))
            
                        # create cube to hold snr maps 
                        #snrMapCube = np.zeros((2,len(klmodes),yDim,xDim))
                    runKLIP = True
                    
                    if os.path.isfile(fname):
                        print(outfile_name+klipstr+suff, fname)
                        incube = fits.getdata(fname)
                        head = fits.getheader(fname)
                        klmodes2 = head['KLMODES'][1:-1]
                        klmodes2 = list(map(int, klmodes2.split(",")))
        
                        if (len([k for k in klmodes if not k in klmodes2]) == 0):
                            if verbose is True:
                                print("Found KLIP processed images for same parameters saved to disk. Reading in data.")
                            #don't re-run KLIP
                            runKLIP = False
        
                    if (runKLIP):
                        if verbose is True:
                            print("Starting KLIP")
                        #run klip for given parameters
                        #read in a fresh copy of dataset so no compound highpass filtering
                        dataset.input = dataset_input_clean
                        parallelized.klip_dataset(dataset, outputdir=(path_to_files + "_klip/"), fileprefix=outfile_name+klipstr+suff, 
                            annuli=numAnn, subsections=s, movement=m, numbasis=klmodes, calibrate_flux=calibrate_flux, 
                            mode="ADI", highpass = highpass, time_collapse=time_collapse, verbose = verbose)

                        #read in the final image and header
                        print(outfile_name+klipstr+suff, fname)
                        #read in file that was created so can add to header
                        incube = fits.getdata(fname)
                        head = fits.getheader(fname)

                        #add KLMODES keyword to header
                        #this also has the effect of giving the file a single header instead of pyklip's double
                        head["KLMODES"]=str(klmodes)
                        fits.writeto(fname, incube, head, overwrite=True)
                    
                    if input_contrast is not False:
                        dataset_copy = np.copy(incube)
                    
                        # Find planet x and y positions from pa and sep
                        x_positions = [r*np.cos((np.radians(p+90)))+ dataset.centers[0][0] for r, p in zip(ra, pa)]
                        y_positions = [r*np.sin((np.radians(p+90)))+ dataset.centers[0][0] for r, p in zip(ra, pa)]
                    
                        # Loop through kl modes
                        cont_meas = np.zeros((len(klmodes), 1))
                        for k in range(len(klmodes)):
                        
                            dataset_contunits = dataset_copy[k]/np.median(starpeak)
                            
                            
                            
                            if runKLIP is False:
                                w = wcsgen.generate_wcs(parangs = 0, center = dataset.centers[0])
                            else:
                                w = dataset.output_wcs[0]
                            

                            # Retrieve flux of injected planet
                            planet_fluxes = []
                            for sep, p in zip(ra, pa):
                                fake_flux = fakes.retrieve_planet_flux(dataset_contunits, dataset.centers[0], w, sep, p, searchrad=7)
                                planet_fluxes.append(fake_flux)

                        
                            # Calculate the throughput
                            tpt = np.array(planet_fluxes)/np.array(input_contrast)
                            

                            # Create an array with the indices are that of KL mode frame with index 2
                            ydat, xdat = np.indices(dataset_contunits.shape)

                        

                            # Mask the planets
                            for x, y in zip(x_positions, y_positions):

                                # Create an array with the indices are that of KL mode frame with index 2
                                distance_from_star = np.sqrt((xdat - x) ** 2 + (ydat - y) ** 2)

                                # Mask
                                dataset_contunits[np.where(distance_from_star <= 2 * FWHM)] = np.nan
                                masked_cube = dataset_contunits

                            # Measure the raw contrast
                            contrast_seps, contrast = klip.meas_contrast(dat=masked_cube, iwa=iwa, owa=dataset.OWA, resolution=(7), center=dataset.centers[0], low_pass_filter=True)

                            # Find the contrast to be used 
                            use_contrast = np.interp(np.median(ra), contrast_seps, contrast)
                            

                            # Calibrate the contrast
                            cal_contrast = use_contrast/np.median(tpt)
                            cont_meas[k] = -cal_contrast
                            
        
                    # makes SNR map
                    snrmaps, peaksnr, snrsums, snrspurious= snr.create_map(fname, FWHM, smooth=snrsmt, planets=mask, saveOutput=False, sigma = 5, checkmask=False, verbose = verbose)

                    PECube[0:2, scount, :, :, acount, mcount] = peaksnr
                    PECube[2:4, scount, :, :, acount, mcount] = snrsums
                    PECube[4:6, scount, :, :, acount, mcount] = snrspurious[:,:,None,0]
                    PECube[6:8, scount, :, :, acount, mcount] = snrspurious[:,:,None,1]
                    PECube[8, scount, :, :, acount, mcount] = cont_meas

                    if(runKLIP) and np.nanmedian(peaksnr)>3:
                        writeData(incube, hdr, a, m, s)
                    if verbose is True:
                        print("Median peak SNR > 3. Writing median image combinations to " + path_to_files + "_klip/")
                        
                    if saveSNR is True:
                        writeData(snrmaps, hdr, a, m, s, snrmap = True, pre = 'snrmap_')
                        if verbose is True:
                            print("Writing SNR maps to " + path_to_files + "_klip/")
        
                
                    scount+=1
                mcount+=1                
        acount+=1
    
    if verbose is True:        
        print("Writing parameter explorer file to " + path_to_files + "_klip/")

    #write parameter explorer cube to disk
    writeData(PECube, hdr, annuli, movement, subsections, snrmap = True, pre = 'paramexplore_')

    if verbose is True: 
        print("KLIP automation complete")    
        print("End clock time is", time.time())
        print("End process time is", time.process_time())
        print("Total clock runtime: ", time.time()- start_time)
        print("Total process runtime:", time.process_time()-start_process_time)

    return(PECube)
Example #7
0
def test_spectral_collapse():
    """
    Tests the spectral collpase feature
    """
    # grab the files
    filelist = glob.glob(testdir +
                         os.path.join("data", "S20131210*distorcorr.fits"))
    # hopefully there is still 3 filelists
    assert (len(filelist) == 3)

    # create the dataset object
    dataset = GPI.GPIData(filelist, highpass=False)

    # collapse into 2 channels
    dataset.spectral_collapse(collapse_channels=2)

    assert (dataset.input.shape[0] == len(filelist) * 2)
    assert (np.size(dataset.spot_flux) == len(filelist) * 2)

    # collapse again, now into broadband
    dataset.spectral_collapse()

    assert (dataset.input.shape[0] == len(filelist))

    # run a broadband reduction
    outputdir = testdir
    prefix = "broadbandcollapse-betapic-j-k100a9s4m1-fakes50pa50"
    parallelized.klip_dataset(dataset,
                              outputdir=outputdir,
                              fileprefix=prefix,
                              annuli=9,
                              subsections=4,
                              movement=1,
                              numbasis=[1],
                              calibrate_flux=True,
                              mode="ADI",
                              lite=False,
                              highpass=True)

    # look at the output data. Validate the KL mode cube
    kl_hdulist = fits.open("{out}/{pre}-KLmodes-all.fits".format(out=outputdir,
                                                                 pre=prefix))
    klframe = kl_hdulist[1].data[0]

    # check beta pic b is where we think it is
    true_sep = 426.6 / 1e3 / GPI.GPIData.lenslet_scale  # in pixels
    true_pa = 212.2  # degrees

    # find planet in collapsed cube
    flux_meas, x_meas, y_meas, fwhm_meas = fakes.retrieve_planet(
        klframe,
        dataset.output_centers[0],
        dataset.output_wcs[0],
        true_sep,
        true_pa,
        searchrad=4,
        guesspeak=2.e-5,
        guessfwhm=2)
    print(flux_meas, x_meas, y_meas, fwhm_meas)

    # positonal error
    theta = fakes.convert_pa_to_image_polar(true_pa, dataset.output_wcs[0])
    true_x = true_sep * np.cos(np.radians(theta)) + dataset.output_centers[0,
                                                                           0]
    true_y = true_sep * np.sin(np.radians(theta)) + dataset.output_centers[0,
                                                                           1]
    assert np.abs(true_x - x_meas) < 0.4
    assert np.abs(true_y - y_meas) < 0.4
Example #8
0
import pyklip.instruments.GPI as GPI
import pyklip.instruments.MAGAO as MAGAO
import pyklip.parallelized as parallelized
import numpy as np
import pyklip.klip as klip
from astropy.io import fits

#filelist = glob.glob("20141218_H_Spec/*.fits")
filelist = glob.glob("../HD142527/HD142527/8Apr14/MERGED_long_sets/sliced/*.fits")
#filelist = glob.glob("spiral/sliced/*.fits")
dataset = MAGAO.MAGAOData(filelist)
#dataset = GPI.GPIData(filelist)

outputFileName = "tutorialObject"

parallelized.klip_dataset(dataset, outputdir="", fileprefix=outputFileName, annuli=1, subsections=1, movement=1, numbasis=[1,2,3,4,5,10,20,50,100], calibrate_flux=False, mode="ADI")

print("Shape of dataset.output is " + str(dataset.output.shape))
print("Shape of dataset.output[1] is " + str(dataset.output[1].shape))
avgframe = np.nanmean(dataset.output[1], axis=(0,1))
print("Shape of avgframe is " + str(avgframe.shape))
calib_frame = dataset.calibrate_output(avgframe)

print("Shape of calib_frame: " + str(calib_frame.shape))
#seps, contrast = klip.meas_contrast(calib_frame, dataset.IWA, 1.1/GPI.GPIData.lenslet_scale, 3.5)

print("Completed klipping. Rotating images")
hdulist = fits.open(outputFileName+"-KLmodes-all.fits")
cube = hdulist[1].data
hdulist.close()
cube = cube[:,:,::-1]
def test_p1640_tutorial(mock_klip_parallelized):
    """
    Tests P1640 support by running through the P1640 tutorial without the interactive parts.
     
    Follows the P1640 tutorial in docs and runs a test using the tutorial as a guideline. Goes through downloading the 
    sample tarball, extracting the datacubes, fitting the grid spots, running KLIP on the datacubes, and outputting
    the files. The test checks that there are the correct number of files in each step outputted in the correct 
    directories.
    The test also ignores all interactive modes such as vetting the cubes and grid spots. 
    
    """

    #create a mocked klip parallelized
    mock_klip_parallelized.return_value = (np.zeros((3, 96, 281, 281)), np.array([140, 140]))

    directory = os.path.dirname(os.path.abspath(__file__)) + os.path.sep + os.path.join('..', 'pyklip', 'instruments',
                                                                                        'P1640_support', 'tutorial')
    tarball_get = 'wget https://sites.google.com/site/aguilarja/otherstuff/pyklip-tutorial-data/P1640_tutorial_data' \
                  '.tar.gz '
    tarball_command = 'tar -xvf P1640_tutorial_data.tar.gz'

    # time it
    t1 = time()

    os.system(tarball_get)
    os.system(tarball_command)
    filelist = glob.glob("*Occulted*fits")
    # should have 3 files in the directory after downloading and unzipping the tarball.
    assert (len(filelist) == 3)

    # Ignoring interactive vet the datacubes.
    good_cubes = []
    for file in filelist:
        good_cubes.append(os.path.abspath(file))

    # Fit grid spots
    import pyklip.instruments.P1640_support.P1640spots as P1640spots
    spot_filepath = directory + os.path.sep + 'shared_spot_folder/'
    spot_filesuffix = '-spot'
    spot_fileext = 'csv'
    for test_file in good_cubes:
        spot_positions = P1640spots.get_single_file_spot_positions(test_file, rotated_spots=False)
        P1640spots.write_spots_to_file(test_file, spot_positions, spot_filepath,
                                       spotid=spot_filesuffix, ext=spot_fileext, overwrite=False)
    # should have 12 csv files outputted
    test1 = glob.glob("%stutorial*" % spot_filepath)
    assert (len(test1) == 12)

    # Again, ignoring interactive vet grid spots
    # run KLIP in SDI mode
    import pyklip.instruments.P1640 as P1640
    import pyklip.parallelized as parallelized
    dataset = P1640.P1640Data(filelist, spot_directory=spot_filepath)
    output = directory + os.path.sep + "output/"
    parallelized.klip_dataset(dataset, outputdir=output, fileprefix="woohoo", annuli=5, subsections=4, movement=3,
                              numbasis=[1, 20, 100], calibrate_flux=False, mode="SDI")
    # should have 4 outputted files
    p1640_globbed = glob.glob(output + "*")
    assert (len(p1640_globbed) == 4)

    print("{0} seconds to run".format(time() - t1))
    # fits.writeto("/Users/jmazoyer/Desktop/aperture.fits", aperture , overwrite=True)
    # mask_zone_spectrai = mask_zone_spectra[i,:,:]
    # toto[i,:,:] = (1 + 2*mask_zone_spectra[i,:,:])*convolve(model_initial_non_convolved,psf_by_wavelength[i,:,:], boundary = 'wrap')
    # print(convolve(model_initial_non_convolved,psf_by_wavelength[i,:,:], boundary = 'wrap')[np.where(mask_zone_spectrai)])

# fits.writeto("/Users/jmazoyer/Desktop/show_zone_on_model.fits", toto , overwrite=True)

#### First reduction of the data using parallelized.klip_dataset (no FM)
blockPrint()
parallelized.klip_dataset(dataset,
                          numbasis=KLMODE,
                          maxnumbasis=100,
                          annuli=1,
                          subsections=1,
                          mode='ADI',
                          outputdir=resultdir,
                          fileprefix=file_prefix_all +
                          '_Measurewithparallelized',
                          aligned_center=[140, 140],
                          highpass=False,
                          minrot=mov_here,
                          calibrate_flux=True)
enablePrint()

if nb_wl == 1:
    data_reduction = fits.getdata(resultdir + file_prefix_all +
                                  "_Measurewithparallelized-KLmodes-all.fits")
    # fits.writeto("/Users/jmazoyer/Desktop/show_zone_on_data.fits", (1 + 2*mask_zone_spectra)*data_reduction, overwrite=True)

else:
    data_reduction = fits.getdata(resultdir + file_prefix_all +
Example #11
0
        def multiple_planet_injection(datadir, filtername, seps, input_pas,
                                      num_datasets, input_contrasts, mode):
            """
            Injects multiple fake planets across multiple datasets.

            Args:
                datadir (str): The name of the directory that the data is contained in
                filtername (str) The name of the filter to be used
                seps (list: int): List of separations each planet should be injected at
                input_pas (list: int): List of position angles to inject fake planets at 
                num_datastes(int): The number of datasets to be generated. This is equal to the number of interations of planet injection/number of position angle changes
                input_contrasts(list: float): List of contrasts planets should be injected at
            Returns:
                retrieved_fluxes_all (list): All retrieved planet fluxes
                pas_all (list): All position angles used for injection
                planet_seps_all (list): All planet separations used for injection
                input_contrasts_all (list): All planet contrasts used for injection
            """

            pas_all = []
            retrieved_fluxes_all = []
            planet_seps_all = []
            input_contrasts_all = []

            # Generate desired number of datasets: number of loops at each separation
            datasets, psflibs = generate_datasets(
                datadir,
                roll_filenames_list=roll_filenames_list,
                ref_filenames_list=ref_filenames_list,
                rollnames_list=rollnames_list,
                pas_list=pas_list,
                mode=mode,
                num_datasets=num_datasets)

            # Begin fake planet injection and retrieval, changing position angle each time
            for dataset_num, dataset, psflib in zip(range(len(datasets)),
                                                    datasets, psflibs):
                if mode == 'RDI':
                    psflib.prepare_library(dataset)

                # Create stamps of the point spread function to be injected as a fake planet
                psf_stamp_input = np.array([psf_stamp for j in range(12)])

                # Clock the position angles of the injected planets by 40 each time
                input_pas = [x + 40 * dataset_num for x in input_pas]

                start_over = False

                # Inject fake planets
                for input_contrast, sep, pa in zip(input_contrasts, seps,
                                                   input_pas):

                    # Check the distance between the planet to be injected and the real planets. We don't want to inject fake planets too close to the two planets already in the data.
                    if x_positions is not None:
                        check_sep_x = sep * np.cos((pa + 90))
                        check_sep_y = sep * np.sin((pa + 90))
                        dist_p1 = np.sqrt((check_sep_x - x_positions[0])**2 +
                                          (check_sep_y - y_positions[0])**2)
                        dist_p2 = np.sqrt((check_sep_x - x_positions[1])**2 +
                                          (check_sep_y - y_positions[1])**2)

                        # Make sure fake planets won't be injected within a 12 pixel radius of the real planets
                        if dist_p1 > 12 and dist_p2 > 12:

                            planet_fluxes = psf_stamp_input * input_contrast
                            fakes.inject_planet(frames=dataset.input,
                                                centers=dataset.centers,
                                                inputflux=planet_fluxes,
                                                astr_hdrs=dataset.wcs,
                                                radius=sep,
                                                pa=pa,
                                                field_dependent_correction=
                                                transmission_corrected)

                        # If the fake planet to be injected is within a 12 pixel radius of the real planets, start the loop over
                        else:
                            start_over = True

                    elif x_positions is None:
                        planet_fluxes = psf_stamp_input * input_contrast
                        fakes.inject_planet(
                            frames=dataset.input,
                            centers=dataset.centers,
                            inputflux=planet_fluxes,
                            astr_hdrs=dataset.wcs,
                            radius=sep,
                            pa=pa,
                            field_dependent_correction=transmission_corrected)

                    if start_over:
                        continue

                # Run KLIP on datasets with injected planets: Set output directory
                outputdir = "notebooks/contrastcurves"
                fileprefix = f"FAKE_KLIP_{mode}_A9K5S4M1_{str(dataset_num)}{str(n_sep_loops)}"
                filename = f"FAKE_KLIP_{mode}_A9K5S4M1_{str(dataset_num)}{str(n_sep_loops)}-KLmodes-all.fits"

                # Run KLIP
                parallelized.klip_dataset(dataset,
                                          outputdir=outputdir,
                                          fileprefix=fileprefix,
                                          algo="klip",
                                          annuli=annuli,
                                          subsections=subsections,
                                          minrot=minrot,
                                          numbasis=numbasis,
                                          mode=mode,
                                          verbose=False,
                                          psf_library=psflib)

                # Open one frame of the KLIP-ed dataset
                klipdataset = os.path.join(outputdir, filename)
                with fits.open(klipdataset) as hdulist:
                    outputfile = hdulist[0].data
                    outputfile_centers = [
                        hdulist[0].header["PSFCENTX"],
                        hdulist[0].header["PSFCENTY"]
                    ]
                outputfile_frame = outputfile[2]

                # Retrieve planet fluxes
                retrieved_planet_fluxes = []
                for input_contrast, sep, pa in zip(input_contrasts, seps,
                                                   input_pas):

                    fake_flux = fakes.retrieve_planet_flux(
                        frames=outputfile_frame,
                        centers=outputfile_centers,
                        astr_hdrs=dataset.output_wcs[0],
                        sep=sep,
                        pa=pa,
                        searchrad=7)
                    retrieved_planet_fluxes.append(fake_flux)
                retrieved_fluxes_all.extend(retrieved_planet_fluxes)
                pas_all.extend(input_pas)
                planet_seps_all.extend(seps)
                input_contrasts_all.extend(input_contrasts)

            return retrieved_fluxes_all, pas_all, planet_seps_all, input_contrasts_all
Example #12
0
            datadir,
            roll_filenames_list=roll_filenames_list,
            ref_filenames_list=ref_filenames_list,
            rollnames_list=rollnames_list,
            pas_list=pas_list,
            mode=mode)

        if mode == 'RDI':
            psflib.prepare_library(dataset)

        # Run pyKLIP RDI
        parallelized.klip_dataset(dataset,
                                  outputdir=outputdir,
                                  fileprefix=fileprefix,
                                  annuli=annuli,
                                  subsections=subsections,
                                  numbasis=numbasis,
                                  minrot=minrot,
                                  mode=mode,
                                  psf_library=psflib)

        # Read in the KLIP-ed dataset
        filesuffix = "-KLmodes-all.fits"

        with fits.open(f"{fileprefix}{filesuffix}") as hdulist:
            reduced_cube = hdulist[0].data
            reduced_centers = [
                hdulist[0].header["PSFCENTX"], hdulist[0].header["PSFCENTY"]
            ]

        # Read in the KLIP-ed dataset
Example #13
0
                    str(pathToFiles) + "_klip/med_" + outputFileName + "_a" + str(a) + "m" + str(m) + "s" + str(
                        s) + "iwa" + str(iwa) + '_klmodes-all.fits')
                klmodes2 = hdulist[0].header['klmodes'][1:-1]
                klmodes2 = list(map(int, klmodes2.split(",")))

                if (len([k for k in klmodes if not k in klmodes2]) == 0):
                    print("Found KLIP processed images for same parameters saved to disk. Reading in data.")
                    runKLIP = False
                    for i in range(len(klmodes)):
                        cube[i, :, :] = hdulist[0].data[klmodes2.index(klmodes[i]), :, :]

            if (runKLIP):
                print("Starting KLIP")
                # run klip for given parameters
                parallelized.klip_dataset(dataset, outputdir=(pathToFiles + "_klip/"),
                                          fileprefix=str(outputFileName), annuli=a, subsections=s, movement=m,
                                          numbasis=klmodes, calibrate_flux=True, mode="ADI", highpass=_highpass)
                # flips images
                #shouldn't need to flip anymore
                output = dataset.output
                #output = dataset.output[:, :, :, ::-1]

            # keeps track of number of KL mode values that have been tested, used for indexing
            kcount = 0

            # iterates over kl modes
            for k in klmodes:
                print("KL mode", k)
                if (runKLIP):
                    # takes median combination of cube made with given number of KL modes
                    isolatedKL = np.nanmedian(output[kcount, :, :, :], axis=0)