예제 #1
0
def background_correct(raw_tpf):
    bkg = _estimate_background(raw_tpf)
    hdu = deepcopy(raw_tpf.hdu)
    hdu[1].data['FLUX'] -= np.atleast_3d(bkg).transpose([1, 2, 0])
    hdu[1].data['FLUX'] -= np.min(hdu[1].data['FLUX'])
    fits.HDUList(hdus=list(hdu)).writeto('hack.fits', overwrite=True)
    tpf = lk.TessTargetPixelFile('hack.fits')
    os.remove('hack.fits')
    return tpf
예제 #2
0
def background_correct(raw_tpf):
    ''' Correct the background in a TPF

    Parameters
    ----------
    raw_tpf : lk.TessTargetPixelFile
        Input TPF

    Returns
    -------
    tpf : lk.TessTargetPixelFile
        TPF with background removed
    '''
    bkg = _estimate_background(raw_tpf)
    hdu = deepcopy(raw_tpf.hdu)
    hdu[1].data['FLUX'][raw_tpf.quality_mask] -= np.atleast_3d(bkg).transpose([1, 2, 0])
    hdu[1].data['FLUX'][raw_tpf.quality_mask] -= np.min(hdu[1].data['FLUX'])
    fits.HDUList(hdus=list(hdu)).writeto('hack.fits', overwrite=True)
    tpf = lk.TessTargetPixelFile('hack.fits', quality_bitmask=raw_tpf.quality_bitmask)
    os.remove('hack.fits')
    return tpf
예제 #3
0
def Catalog_scene(Ra,
                  Dec,
                  Size,
                  Maglim=19,
                  Catalog='unified',
                  Local=None,
                  Sector=None,
                  Bkg_limit=20.5,
                  Zeropoint=20.44,
                  Scale=100,
                  Interpolate=False,
                  FFT=False,
                  PRF=True,
                  Plot=False,
                  Save=None):
    """
	Create a simulated TESS image using Gaia sources. 

	-------
	Inputs-
	-------
		Ra 				float 	RA of image centre 
		DEC 			float 	Dec of image centre 
		Size 			int 	Size of the TESS image in pixels
		Maglim 			float 	Magnitude limit for Gaia sources
		Bkg_lim 		float 	TESS limiting magnitude 
		Zeropoint 		float 	TESS magnitude zeropoint 
		Scale 			int 	Interpolation scale size 
	
	--------
	Options-
	--------
		Interpolate 	bool	Interpolate the TESS PRF to a scale specified by parameter 'Scale'
		Plot 			bool 	Plot the complete scene and TESS image 
		Save 			str 	Save path for figure

	-------
	Output-
	-------
		soures 			array 	Array of simulated TESS images for each Gaia sourcetes

	"""
    if Local is None:
        tpf = Get_TESS(Ra, Dec, Size, Sector=Sector)
    elif type(Local) == str:
        tpf = lk.TessTargetPixelFile(Local)
    elif type(Local) == lk.targetpixelfile.TessTargetPixelFile:
        tpf = Local
    # pos returned as column row
    if Catalog == 'gaia':
        pos, Tmag = Get_Gaia(tpf, magnitude_limit=Maglim)
    if Catalog == 'ps1':
        pos, Tmag = Get_PS1(tpf, magnitude_limit=Maglim)
    if Catalog == 'unified':
        result = Unified_catalog(tpf, magnitude_limit=Maglim)
        col = result.col.values
        row = result.row.values
        Tmag = result.tmag.values
    else:
        col = pos[:, 0]
        row = pos[:, 1]
        result = [pos, Tmag]

    syndiff = {}
    syndiff['catalog'] = result
    syndiff['tpf'] = tpf

    tcounts = 10**(-2 / 5 * (Tmag - Zeropoint))
    bkg = 10**(-2 / 5 * (Bkg_limit - Zeropoint))

    print(len(tcounts))
    sources = np.zeros((len(col), tpf.shape[1], tpf.shape[2]))  #+ bkg
    for i in range(len(col)):
        if Interpolate:
            template = np.zeros(
                ((tpf.shape[1] + 20) * Scale, (tpf.shape[2] + 20) * Scale))
            #print('template shape ',template.shape)
            offset1 = int(10 * Scale)
            offset2 = int(10 * Scale)
            #print(np.nansum(template))
            kernal = Interp_PRF(row[i] + tpf.row, col[i] + tpf.column,
                                tpf.camera, tpf.ccd, Scale)
            #print(np.nansum(kernal))
            if FFT:
                template[int(row[i] * Scale + offset1),
                         int(col[i] * Scale + offset2)] = tcounts[i]
                template = signal.fftconvolve(template, kernal, mode='same')
            else:
                optics = kernal * tcounts[i]
                r = int(row[i] * Scale + offset1)
                c = int(col[i] * Scale + offset2)
                template = Add_convolved_sources(r, c, optics, template)
            #print(np.nansum(template))
            template = template[offset1:int(offset1 + tpf.shape[1] * Scale),
                                offset2:int(offset2 + tpf.shape[2] * Scale)]
            #print('template shape ',template.shape)
            #print(np.nansum(template))
            sources[i] = Downsample(
                template, Scale, pix_response=True
            )  #block_reduce(template,block_size=(Scale,Scale),func=np.nansum)

        else:
            template = np.zeros(((20 + tpf.shape[1]), (20 + tpf.shape[2])))
            offset1 = int(10)
            offset2 = int(10)
            if PRF:
                kernal = Get_PRF(row[i] + tpf.row, col[i] + tpf.column,
                                 tpf.camera, tpf.ccd)
                kernal = kernal / np.nansum(kernal)
                #print(template.shape)

                if FFT:
                    template[int(row[i] + offset1),
                             int(col[i] + offset2)] = tcounts[i]
                    template = signal.fftconvolve(template,
                                                  kernal,
                                                  mode='same')
                else:
                    optics = kernal * tcounts[i]
                    r = int(row[i] + offset1)
                    c = int(col[i] + offset2)
                    template = Add_convolved_sources(r, c, optics, template)
            else:
                template[int(row[i] + offset1),
                         int(col[i] + offset2)] = tcounts[i]
            template = template[offset1:int(offset1 + tpf.shape[1]),
                                offset2:int(offset2 + tpf.shape[2])]

            sources[i] += template
    syndiff['sources'] = sources

    if Plot:
        scene = np.nansum(sources, axis=0)
        #gaia = rotate(np.flipud(gaia*10),-90)
        plt.figure(figsize=(8, 4))
        plt.subplot(1, 2, 1)
        plt.title('{} scene'.format(Catalog))
        norm = ImageNormalize(vmin=np.nanmin(scene),
                              vmax=np.nanmax(scene),
                              stretch=SqrtStretch())
        im = plt.imshow(scene, origin='lower', norm=norm)
        plt.xlim(-0.5, Size - 0.5)
        plt.ylim(-0.5, Size - 0.5)
        plt.plot(col - .5, row - .5, 'r.', alpha=0.5)
        ax = plt.gca()
        divider = make_axes_locatable(ax)
        cax = divider.append_axes("right", size="5%", pad=0.05)
        plt.colorbar(im, cax=cax)

        plt.subplot(1, 2, 2)
        plt.title('TESS image')
        tess = np.nanmedian(tpf.flux - 96, axis=0)
        norm = ImageNormalize(vmin=np.nanmin(tess),
                              vmax=np.nanmax(tess),
                              stretch=SqrtStretch())
        im = plt.imshow(tess, origin='lower', norm=norm)
        plt.xlim(-0.5, Size - 0.5)
        plt.ylim(-0.5, Size - 0.5)
        plt.plot(col - .5, row - .5, 'r.', alpha=0.5)
        ax = plt.gca()
        divider = make_axes_locatable(ax)
        cax = divider.append_axes("right", size="5%", pad=0.05)
        plt.colorbar(im, cax=cax)

        plt.tight_layout()
        plt.show()
        if type(Save) != type(None):
            plt.savefig(Save)

    return syndiff
예제 #4
0
    def get_tpf(self, sector=None, quality_bitmask=None, return_df=False):
        """Download tpf from MAST given coordinates
           though using TIC id yields unique match.

        Parameters
        ----------
        sector : int
            TESS sector
        fitsoutdir : str
            fits output directory

        Returns
        -------
        tpf and/or df: lk.targetpixelfile, pd.DataFrame

        Note: find a way to compress the logic below
        if tpf is None:
            - download_tpf
        else:
            if tpf.sector==sector
                - load tpf
        else:
            - download_tpf
        """
        sector = sector if sector else self.sector
        quality_bitmask = (quality_bitmask
                           if quality_bitmask else self.quality_bitmask)
        if self.tpf is None:
            if self.ticid is not None:
                # search by TICID
                ticstr = f"TIC {self.ticid}"
                if self.verbose:
                    print(f"\nSearching TPF in MAST for {ticstr}.\n")
                res = lk.search_targetpixelfile(ticstr,
                                                mission=MISSION,
                                                sector=None)
            else:
                # search by position
                if self.verbose:
                    print(
                        f"\nSearching TPF in MAST for ra,dec=({self.target_coord.to_string()}).\n"
                    )
                res = lk.search_targetpixelfile(
                    self.target_coord,
                    mission=MISSION,
                    sector=None,  # search all if sector=None
                )
            assert res is not None, "No results from lightkurve search."
        else:
            # if self.tpf.sector == sector:
            #     # reload from memory
            #     tpf = self.tpf
            # else:
            if self.verbose:
                print("Searching targetpixelfile using lightkurve.")
            if self.ticid:
                ticstr = f"TIC {self.ticid}"
                if self.verbose:
                    print(f"\nSearching mast for {ticstr}.\n")
                res = lk.search_targetpixelfile(ticstr,
                                                mission=MISSION,
                                                sector=None)
            else:
                if self.verbose:
                    print(
                        f"\nSearching mast for ra,dec=({self.target_coord.to_string()}).\n"
                    )
                res = lk.search_targetpixelfile(
                    self.target_coord,
                    mission=MISSION,
                    sector=None,  # search all if sector=None
                )
            assert res is not None, "No results from lightkurve search."
        df = res.table.to_pandas()

        if len(df) > 0:
            all_sectors = [int(i) for i in df["sequence_number"].values]
            if sector:
                sector_idx = df["sequence_number"][df["sequence_number"].isin(
                    [sector])].index.tolist()
                if len(sector_idx) == 0:
                    raise ValueError(
                        "sector {} data is unavailable".format(sector))
                obsid = df.iloc[sector_idx]["obs_id"].values[0]
                # ticid = int(df.iloc[sector_idx]["target_name"].values[0])
                fitsfilename = df.iloc[sector_idx]["productFilename"].values[0]
            else:
                sector_idx = 0
                sector = int(df.iloc[sector_idx]["sequence_number"])
                obsid = df.iloc[sector_idx]["obs_id"]
                # ticid = int(df.iloc[sector_idx]["target_name"])
                fitsfilename = df.iloc[sector_idx]["productFilename"]

            msg = f"{len(df)} tpf(s) found in sector(s) {all_sectors}.\n"
            msg += f"Using data from sector {sector} only.\n"
            if self.verbose:
                logging.info(msg)
                print(msg)

            filepath = join(fitsoutdir, "mastDownload/TESS", obsid,
                            fitsfilename)
            if not exists(filepath) or self.clobber:
                if self.verbose:
                    print(f"Downloading TIC {self.ticid} ...\n")
                ticstr = f"TIC {self.ticid}"
                res = lk.search_targetpixelfile(ticstr,
                                                mission=MISSION,
                                                sector=sector)
                tpf = res.download(quality_bitmask=quality_bitmask,
                                   download_dir=fitsoutdir)
            else:
                if self.verbose:
                    print("Loading TIC {} from {}/\n".format(
                        self.ticid, fitsoutdir))
                tpf = lk.TessTargetPixelFile(filepath)
            if self.apply_data_quality_mask:
                tpf = remove_bad_data(tpf, sector=sector, verbose=self.verbose)
            self.tpf = tpf
            if return_df:
                return tpf, df
            else:
                return tpf
        else:
            msg = "No tpf file found! Check FFI data using --cadence=long\n"
            logging.info(msg)
            raise FileNotFoundError(msg)
예제 #5
0
def extract_light_curve(fits_filename, outputdir, return_msg=True):

    # Output name
    output = Path(fits_filename.stem + '_corrected.pickled')
    output = outputdir / output

    # Parameters and  Criteria:
    sigma_clipping = 5  # To be applied after the detrending of the light curve

    # Structure the data to be saved
    results = {
        'tic': None,
        'sector': None,
        'ra': None,
        'dec': None,
        'headers': None,  # Headers from the original FITS file
        'fit': None,  # Result from fit
        'neighbours_all': None,  # All neighbours stars info
        'neighbours_used': None,  # Used neighbours stars info
        'target': None,  # Target star info
        'aperture_threshold': None,  # HDU to store tabular information
        'pca_all': None,  # PCA results
        'pca_used': None,  # PCA results
        'centroids': None,  # Centroids results
        'excluded_intervals': None,  # Excluded intervals in days
        'lc_raw': None,  # Light curves
        'lc_raw_nonan': None,  # Light curves
        'lc_trend': None,  # Light curves
        'lc_regressed': None,  # Light curves
        'lc_regressed_notoutlier': None,  # Light curves
        'median_image': None,
        'masks': None,
        'tag': None
    }

    # Save information from header from original FITS file
    HDUL, ext = [], 0
    while True:
        try:
            HDUL.append(
                fits.getheader(fits_filename.as_posix(), ext=ext).tostring())
            ext += 1
        except IndexError:
            break
        except Exception as e:
            print('Unexpected exception when reading headers from FITS: ', e)
            break
    results['headers'] = HDUL

    # Load the TESS taret pixel file
    try:
        tpf = lk.TessTargetPixelFile(fits_filename.as_posix())
    except Exception as e:
        # Save results
        err_msg = f'"lightkurve.TessTargetPixelFile()" could not open file {fits_filename.as_posix()}. Exception: {e}'
        results['tag'] = err_msg
        picklefile = open(output.as_posix(), 'wb')
        pickle.dump(results, picklefile)
        picklefile.close()
        if return_msg: return err_msg
        return
    tic = str(tpf.get_keyword('ticid'))
    sector = str(tpf.get_keyword('sector'))
    target_ra = np.float(tpf.ra)
    target_dec = np.float(tpf.dec)
    # Store to results
    results['tic'], results['sector'] = tic, sector
    results['ra'], results['dec'] = target_ra, target_dec

    # Initialize messages
    id_msg = f'TIC {tic} Sector {sector}: Skipped: '
    OK_msg = f'TIC {tic} Sector {sector}: OK'

    # Calculate the median image
    with warnings.catch_warnings():
        warnings.simplefilter('ignore')
        median_image = np.nanmedian(tpf.flux, axis=0)
        # Store to results
        results['median_image'] = median_image

    # Estimate of aperture mask and background mask
    ap_mask_threshold, bkg_mask_threshold = 5, 3
    ap_mask = threshold_mask(median_image,
                             threshold=ap_mask_threshold,
                             reference_pixel='center')
    ap_bkg = ~threshold_mask(
        median_image, threshold=bkg_mask_threshold, reference_pixel=None)
    # Exclude NaN values outside the camera
    ap_bkg &= ~np.isnan(median_image)
    # Estimate the median flux background
    median_bkg_flux = np.median(median_image[ap_bkg])
    # Store to results
    results['masks'] = {'aperture':ap_mask,\
                        'background':ap_bkg}

    # Check validity of aperture mask
    OK_ap_mask, err_msg = check_aperture_mask(ap_mask, id_msg)
    # If aperture is not good, exit program with corresponding message
    if not OK_ap_mask:
        # Save results
        results['tag'] = err_msg
        picklefile = open(output.as_posix(), 'wb')
        pickle.dump(results, picklefile)
        picklefile.close()
        if return_msg: return err_msg
        return

    # Refine aperture
    try:
        WCS = tpf.wcs
    except IndexError:
        # Save results
        err_msg = id_msg + 'No WCS info in header'
        results['tag'] = err_msg
        picklefile = open(output.as_posix(), 'wb')
        pickle.dump(results, picklefile)
        picklefile.close()
        if return_msg: return err_msg
        return
    ap_mask,\
    target_coord_pixel, target_tmag,\
    nb_coords_pixel, nb_tmags,\
    err_msg = refine_aperture(results, tic, target_ra, target_dec, WCS,\
                  ap_mask, ap_mask_threshold, median_image, prepend_err_msg=id_msg)
    # If not satisfactory aperture mask
    if ap_mask is None:
        # Save results
        results['tag'] = err_msg
        picklefile = open(output.as_posix(), 'wb')
        pickle.dump(results, picklefile)
        picklefile.close()
        if return_msg: return err_msg
        return

    # Variation in time of aperture's center of mass
    centroid_col, centroid_row = tpf.estimate_centroids(aperture_mask=ap_mask,
                                                        method='quadratic')
    centroid_col -= tpf.column
    centroid_row -= tpf.row
    sqrt_col2_row2 = np.sqrt(centroid_col**2 + centroid_row**2)
    # Store to results
    results['centroids'] = {'col':centroid_col,\
                            'row':centroid_row,\
                            'sqrt_col2_row2':sqrt_col2_row2,\
                            'time':tpf.time}

    # Fit the image and find the contamination fraction within the aperture mask
    fitted_image, err_msg = contamination(results, median_image,ap_mask,\
                                 target_coord_pixel, target_tmag,\
                                 nb_coords_pixel, nb_tmags,\
                                 tpf.wcs,median_bkg_flux,
                                 prepend_err_msg=id_msg)
    if fitted_image is None:
        # Save results
        results['tag'] = err_msg
        picklefile = open(output.as_posix(), 'wb')
        pickle.dump(results, picklefile)
        picklefile.close()
        if return_msg: return err_msg
        return

    # Generate the raw light curve
    lc_raw = tpf.to_lightcurve(aperture_mask=ap_mask, method='aperture')
    # Store to results
    results['lc_raw'] = {'flux':lc_raw.flux,\
                         'time':lc_raw.time}

    # Find the indices of the quality mask that created the light curve
    ind = np.argwhere(tpf.quality_mask == True)
    # Masks with True value the light curve times with null or NaN flux
    mask = lc_raw.flux == 0
    mask |= lc_raw.flux == np.nan
    # Set to False the indices to be masked (ignored).
    # Note that we take a subset from `ind` because the masks where defined from the light curve
    tpf.quality_mask[ind[mask]] = False

    # Exclude intervals previously decided
    exclude_interval(tpf, sector, results)

    # Generate the light curve
    lc = tpf.to_lightcurve(aperture_mask=ap_mask, method='aperture')
    # Store to results
    results['lc_raw_nonan'] = {'flux':lc.flux,\
                               'time':lc.time}

    # Make a design matrix and pass it to a linear regression corrector
    regressors = tpf.flux[:, ap_bkg]

    # Number of PCs to use
    npc, dm, rc = find_number_of_PCs(results, regressors, lc)

    if npc == 0:
        # Save results
        results['tag'] = id_msg + 'None PC used, no detrended done.'
        picklefile = open(output.as_posix(), 'wb')
        pickle.dump(results, picklefile)
        picklefile.close()
        if return_msg: return err_msg
        return

    try:
        # Detrend light curve using PCA
        dm = lk.DesignMatrix(regressors,
                             name='regressors').pca(npc).append_constant()
        rc = lk.RegressionCorrector(lc)
        lc_regressed = rc.correct(dm)
        lc_trend = rc.diagnostic_lightcurves['regressors']

        # Sigma clipping the remove outliers
        lc_regressed_no_outliers, lc_mask_regressed_outliers = lc_regressed.remove_outliers(
            return_mask=True, sigma=sigma_clipping)

        # Store to results
        results['lc_trend']                = {'flux':lc_trend.flux,\
                                              'time':lc_trend.time}
        results['lc_regressed']            = {'flux':lc_regressed.flux,\
                                              'time':lc_regressed.time,\
                                              'outlier_mask':lc_mask_regressed_outliers,\
                                              'sigma_clipping':sigma_clipping}
        results['lc_regressed_notoutlier'] = {'flux':lc_regressed_no_outliers.flux,\
                                              'time':lc_regressed_no_outliers.time}
        results['pca_used']                = {'coef':rc.coefficients,\
                                              'pc':[dm.values[:,i] for i in range(dm.rank)],\
                                              'dm':dm,\
                                              'rc':rc,\
                                              'npc':npc}

        # Save results
        results['tag'] = 'OK'
        picklefile = open(output.as_posix(), 'wb')
        pickle.dump(results, picklefile)
        picklefile.close()
        if return_msg: return OK_msg
        return

    except Exception as e:
        print('!!!!!!!!!!!!!!!!!!!!!!!!!')
        print(f'   Sector {sector}.')
        print('EXCEPTION:', e)
        print('!!!!!!!!!!!!!!!!!!!!!!!!!')
        if return_msg: return id_msg + '::' + repr(e) + '::' + str(e)
        return
예제 #6
0
def plotTessLightcurve(tic, downloadDir='/epyc/data/tess',
                       sectorList=[1, 2, 3, 4, 5, 6, 7], offset=0,
                       saveFits=False, savePlot=False, plotFile=None):
    """Plotting function for TESS short-cadence light curves.

    Parameters
    ----------
    tic : `int`, TESS Input Catalogue ID number
    downloadDir : `str`, location where target pixel files exist
                   and/or should be saved, optional
    sectorList :  `list`, all observing sectors to use when downloading,
                   plotting, and saving TESS data
    offset : `float`, amount by which to offset normalized flux median from 1
              on the y-axis in plots. Useful if you are looping over
              plotTessLightcurve multiple times and want to plot several
              light curves on the same plot but not have them entirely overlap
    saveFits : `boolean`, if True save FITS lightcurve file to working directory
                if False do not save any output files
    saveFig : `boolean`, if True and plotFile is defined, save figure to disk
               if False or plotFile is None, display figure instead
    plotFile : `str`, required to save figure to disk when saveFig is True

    Example
    -------
    Suggested use:
        import os
        import matplotlib.pyplot as plt
        import lightkurve as lk
        from glob import glob
        from tessLightcurvePlotter import plotTessLightcurve
        plt.figure()
        # define your own ticList here, e.g., ticList = [1234567, 2345678]
        for idx, tic in enumerate(ticList):
            plotTessLightcurve(tic, offset=0.05*idx)
        plt.show()
    """
    downloadDir = os.path.normpath(downloadDir)
    tpf = None
    lc = None
    for sector in sectorList:
        sectorStr = "%.3d" % sector
        if 'epyc' in downloadDir:  # files are in sector subdirectories
            filePath = glob(downloadDir + '/sector' + sectorStr + '/tess*s0' +
                            sectorStr + '*' + str(tic) + '*/*.fits')
        else:  # files are all in one directory (lightkurve default)
            filePath = glob(downloadDir + '/tess*s0' + sectorStr + '*' + str(tic) + '*/*.fits')
        if len(filePath) > 0:  # the file is on disk
            filePath.append(filePath[0])  # hack to avoid fits file opening failure
            tpf = lk.TessTargetPixelFile(filePath[0])
        else:  # the file isn't on disk
            if list(lk.search_targetpixelfile(tic, sector=sector)):  # nonzero search results
                print('Downloading sector {0} for star {1}'.format(sector, tic))
                tpf = lk.search_targetpixelfile(tic, sector=sector).download()
        if tpf and not lc:
            lc = tpf.to_lightcurve().normalize()
        elif tpf and lc:
            newlc = tpf.to_lightcurve().normalize()
            lc = lc.append(newlc)
        else:
            pass  # there is no tpf for this target + sector
    if lc:
        plt.plot(lc.time, lc.flux - offset, label=tic, marker='.', ls='None', alpha=0.2, mec='None')
        if saveFits:
            lc.to_fits(path=str(tic)+'Norm.fits', overwrite=True)
    else:
        print(tic, 'No LC found for any sector')
    plt.xlabel('Time (days)')
    plt.ylabel('Normalized flux')
    plt.legend(frameon=False)
    plt.gca().set_ylim(0.5, 1.1)
    if savePlot and plotFile:
        plt.savefig(plotFile)
예제 #7
0
def load(coord,
         diffim=False,
         out_dir=os.path.join(os.path.expanduser('~'),
                              'tequila_shots/output')):
    """
    Load tequila_shots output file for previous pipeline run.
    WARNING: This code is not fully validated!
    Args:
        coord (SkyCoord): Astropy SkyCoord of target
            (REQUIRED)
        diffim (bool): Load difference imaging?
            (default is False)
        out_dir (str): Work directory path
            (default is '~/tequila_shots/output')
    Returns:
        out_dict: Output dictionary
            Use `out_dict.keys()` to get keywords.
    """

    # Get coordinate name
    coord_name = 'J{0}{1}'.format(
        coord.ra.to_string(unit=u.hourangle, sep='', precision=2, pad=True),
        coord.dec.to_string(sep='', precision=2, alwayssign=True, pad=True))

    # Get the directory of the target
    coord_dir = os.path.join(out_dir, coord_name)

    if not os.path.exists(coord_dir):
        print('Coord directory does not exist!')
        return None

    print('Files in object directory %s:' % coord_dir)
    f_list = os.listdir(coord_dir)

    if len(f_list) == 0:
        print('Coord directory is empty!')
        return None

    [print(f) for f in f_list]
    #TODO/WARNING: sort by sector!!
    print('WARNING: Sectors may be jumbled!!')

    f_list = [os.path.join(coord_dir, f) for f in f_list]

    # Plot image and get TPFs
    out_dict = {}
    out_dict['lc_target'] = []  # Target light curve array of all sectors
    out_dict['lc_target_bkg'] = []  # background light curve array
    out_dict['lc_star'] = []  # Star light curve array of all sectors
    out_dict['aper_target_list'] = [
    ]  # List of target apertures files for each sector
    out_dict['aper_star_list'] = [
    ]  # List of star apertures files for each sector
    out_dict['ref_flux_list'] = [
    ]  # List of target pixel files for each sector
    out_dict['tpf_list'] = []  # List of target pixel files for each sector
    out_dict['tpf_diff_list'] = [
    ]  # List of difference target pixel files for each sector
    out_dict['coord_dir'] = coord_dir  # Target output directory
    out_dict['wcs_ref'] = []  # Reference WCS

    # Populate dict
    for f in f_list:
        if '_panel_' in f:
            img = mpimg.imread(f)
            imgplot = plt.imshow(img)
            plt.gca().axis('off')
            plt.tight_layout()
            plt.show()
        elif 'ref_flux_' in f:
            out_dict['ref_flux_list'].append(fits.open(f)[0].data)
        elif 'lc_target_bkg_' in f:
            out_dict['lc_target_bkg'].append(
                lk.lightcurvefile.LightCurveFile(f))
        elif 'lc_target_' in f:
            out_dict['lc_target'].append(lk.lightcurvefile.LightCurveFile(f))
        elif 'lc_star_' in f:
            out_dict['lc_star'].append(lk.lightcurvefile.LightCurveFile(f))
        # Load TPFs
        if diffim:
            if 'tpf_diff_' in f:
                tpf = lk.TessTargetPixelFile(f)
                out_dict['tpf_list'].append(tpf)
                if out_dict['wcs_ref'] == []:
                    out_dict['wcs_ref'] = tpf.wcs
            elif 'tpfdiff_' in f:
                tpf = lk.TessTargetPixelFile(f)
                out_dict['tpf_diff_list'].append(tpf)
        else:
            if 'tpf_' in f:
                tpf = lk.TessTargetPixelFile(f)
                out_dict['tpf_list'].append(tpf)
                if out_dict['wcs_ref'] == []:
                    out_dict['wcs_ref'] = tpf.wcs

    print('Done loading.')

    return out_dict