Esempio n. 1
0
def get_bv(mag, cbv, cbv2=None, cbv3=None):
    idx = np.isfinite(mag)

    if not np.sum(idx):
        return 0

    if cbv2 is not None and cbv3 is not None:
        for iter in range(1):
            C = least_squares(lstsq_fn, [np.median(mag[idx]), 0.5],
                              args=(mag[idx], cbv[idx], cbv2[idx], cbv3[idx]),
                              verbose=0)
            mag1 = C.x[0] + cbv * C.x[1] + cbv2 * C.x[1]**2 + cbv3 * C.x[1]**3
            idx = np.abs(mag - mag1) < 3.0 * mad_std((mag - mag1)[idx])

        return C.x[1]
    else:
        X = np.vstack([np.ones_like(mag), cbv]).T
        Y = mag

        idx = np.ones_like(Y, dtype=np.bool)
        for i in xrange(3):
            C = np.linalg.lstsq(X[idx], Y[idx])[0]
            YY = np.sum(X * C, axis=1)
            idx = np.abs(Y - YY) < 3.0 * mad_std(Y - YY)

        return -C[1]
Esempio n. 2
0
    def sigma_clip_centroid(self, sigma=3.5, plot=False):
        """
        Sigma-clip the light curve on centroid position (update mask).

        Parameters
        ----------
        sigma : float
            Factor of standard deviations away from the median centroid position
            to clip on.
        plot : bool
            Plot the accepted centroids (in black) and the centroids of the
            rejected fluxes (in red).
        """
        x_mean = np.median(self.centroid_x)
        y_mean = np.median(self.centroid_y)
        x_std = mad_std(self.centroid_x)
        y_std = mad_std(self.centroid_y)

        outliers = (sigma * min([x_std, y_std]) <
                    np.hypot(self.centroid_x - x_mean,
                             self.centroid_y - y_mean))

        if plot:
            plt.scatter(self.centroid_x[~outliers], self.centroid_y[~outliers],
                        marker=',', color='k')
            plt.scatter(self.centroid_x[outliers], self.centroid_y[outliers],
                        marker='.', color='r')
            plt.xlabel('BJD')
            plt.ylabel('Flux')

        self.mask |= outliers
Esempio n. 3
0
    def _flag_pixels(self):
        #setup masked array for inversion
        mstack = np.nanmedian(self.normstack, axis=1)
        dev = np.clip(mad_std(self.normstack, axis=1, ignore_nan=True), 1e-10,
                      None)
        self.sigma_mask = (np.fabs(self.normstack - mstack[:, np.newaxis]) /
                           dev[:, np.newaxis] > 6.)

        #iteratively clean pixels for reconstruction
        for ii in range(5):
            old_mask = np.copy(self.sigma_mask)
            tarray = np.copy(self.normstack)
            tarray[self.sigma_mask] = np.nan

            mstack = np.nanmedian(tarray, axis=1)
            dev = np.clip(mad_std(tarray, axis=1, ignore_nan=True), 1e-10,
                          None)
            self.sigma_mask = (
                np.fabs(self.normstack - mstack[:, np.newaxis]) /
                dev[:, np.newaxis] > 6.)
            if np.all(self.sigma_mask == old_mask):
                break

        logger.info('Flagged %d pixels to mask during reconstruction',
                    np.count_nonzero(self.sigma_mask))
Esempio n. 4
0
    def __call__(self, rho: Optional[float] = 2., niter: int = 5):
        logger.info("Running Celerite Matern 3/2 detrending")

        time = self.ts.time.copy()
        flux = self.ts.flux.copy()
        mask = ones(time.size, bool)

        for i in range(niter):
            time_learn = time[mask]
            flux_learn = flux[mask]

            wn = mad_std(diff(flux_learn)) / sqrt(2)
            log_sigma = log(mad_std(flux_learn))
            log_rho = log(rho)

            kernel = Matern32Term(log_sigma, log_rho)
            gp = GP(kernel, mean=1.0)
            gp.compute(time_learn, yerr=wn)
            self.prediction = gp.predict(flux_learn, time, return_cov=False)
            mask &= ~sigma_clip(flux - self.prediction, sigma=3).mask

        residuals = flux - self.prediction
        self.mask = m = ~(sigma_clip(residuals, sigma_lower=inf,
                                     sigma_upper=4).mask)
        self.ts._data.update('celerite_m32', time[m],
                             (flux - self.prediction + 1)[m], self.ts.ferr[m])
Esempio n. 5
0
def match_fits(fitsfile1, fitsfile2, header=None, sigma_cut=False,
               use_mad_std=True,
               return_header=False, **kwargs):
    """
    Project one FITS file into another's coordinates
    If sigma_cut is used, will try to find only regions that are significant
    in both images using the standard deviation

    Parameters
    ----------
    fitsfile1: str
        Reference fits file name
    fitsfile2: str
        Offset fits file name
    header: pyfits.Header
        Optional - can pass a header to projet both images to
    sigma_cut: bool or int
        Perform a sigma-cut on the returned images at this level
    use_mad_std : bool
        Use mad_std instead of std dev for stddev estimation

    Returns
    -------
    image1,image2,[header] : Two images projected into the same space, and
    optionally the header used to project them
    """

    if header is None:
        header = load_header(fitsfile1)
        image1 = load_data(fitsfile1)
    else: # project image 1 to input header coordinates
        image1 = project_to_header(fitsfile1, header)

    # project image 2 to image 1 coordinates
    image2_projected = project_to_header(fitsfile2, header)

    if image1.shape != image2_projected.shape:
        raise ValueError("Failed to reproject images to same shape.")

    if sigma_cut:
        std1 = stats.mad_std(image1, ignore_nan=True) if use_mad_std else np.nanstd(image1)
        std2 = stats.mad_std(image2_projected, ignore_nan=True) if use_mad_std else np.nanstd(image2_projected)
        corr_image1 = image1*(image1 > (std1*sigma_cut))
        corr_image2 = image2_projected*(image2_projected > (std2*sigma_cut))
        OK = np.isfinite(corr_image1) & np.isfinite(corr_image2)
        if (corr_image1[OK]*corr_image2[OK]).sum() == 0:
            # This state seems to be reached in places when the condition above is False.
            print("Could not use sigma_cut of %f because it excluded all valid data" % sigma_cut)
            corr_image1 = image1
            corr_image2 = image2_projected
    else:
        corr_image1 = image1
        corr_image2 = image2_projected

    returns = corr_image1, corr_image2
    if return_header:
        returns = returns + (header,)
    return returns
Esempio n. 6
0
def sigma_rob(data, iterations=1, thresh=3.0, axis=None):
    """
    Iterative m.a.d. based sigma with positive outlier rejection.
    """
    noise = mad_std(data, axis=axis)
    for _ in range(iterations):
        ind = (np.abs(data) <= thresh * noise).nonzero()
        noise = mad_std(data[ind], axis=axis)
    return noise
def match_fits(fitsfile1, fitsfile2, header=None, sigma_cut=False,
               use_mad_std=True,
               return_header=False, **kwargs):
    """
    Project one FITS file into another's coordinates
    If sigma_cut is used, will try to find only regions that are significant
    in both images using the standard deviation

    Parameters
    ----------
    fitsfile1: str
        Reference fits file name
    fitsfile2: str
        Offset fits file name
    header: pyfits.Header
        Optional - can pass a header to projet both images to
    sigma_cut: bool or int
        Perform a sigma-cut on the returned images at this level
    use_mad_std : bool
        Use mad_std instead of std dev for stddev estimation

    Returns
    -------
    image1,image2,[header] : Two images projected into the same space, and
    optionally the header used to project them
    """

    if header is None:
        header = load_header(fitsfile1)
        image1 = load_data(fitsfile1)
    else: # project image 1 to input header coordinates
        image1 = project_to_header(fitsfile1, header)

    # project image 2 to image 1 coordinates
    image2_projected = project_to_header(fitsfile2, header)

    if image1.shape != image2_projected.shape:
        raise ValueError("Failed to reproject images to same shape.")

    if sigma_cut:
        std1 = stats.mad_std(image1, ignore_nan=True) if use_mad_std else np.nanstd(image1)
        std2 = stats.mad_std(image2_projected, ignore_nan=True) if use_mad_std else np.nanstd(image2_projected)
        corr_image1 = image1*(image1 > std1*sigma_cut)
        corr_image2 = image2_projected*(image2_projected > std2*sigma_cut)
        OK = np.isfinite(corr_image1) & np.isfinite(corr_image2)
        if (corr_image1[OK]*corr_image2[OK]).sum() == 0:
            print("Could not use sigma_cut of %f because it excluded all valid data" % sigma_cut)
            corr_image1 = image1
            corr_image2 = image2_projected
    else:
        corr_image1 = image1
        corr_image2 = image2_projected

    returns = corr_image1, corr_image2
    if return_header:
        returns = returns + (header,)
    return returns
Esempio n. 8
0
def plotresults(testlabels,infer,model_1,model_2):
    std_avg=mad_std(infer-testlabels)
    std_m1=mad_std(model_1-testlabels)
    std_m2=mad_std(model_2-testlabels)
    bias_av,rms_av=returnscatter(testlabels-infer)
    bias_m1,rms_m1=returnscatter(testlabels-model_1)
    bias_m2,rms_m2=returnscatter(testlabels-model_2)
    plt.figure(figsize=(20,15))
    plt.subplot(221)
    x=np.linspace(1.5,4.8,100)
    plt.plot(x,x,c='k',linestyle='dashed')
    plt.scatter(testlabels,infer,facecolors='none',edgecolors='r',label='Average',zorder=10)
    plt.scatter(testlabels,model_1,facecolors='none', edgecolors='g',label='Model-1')
    plt.scatter(testlabels,model_2,facecolors='none', edgecolors='b',label='Model-2')
    plt.xlabel('True')
    plt.ylabel('Inferred')
    plt.legend(loc=2)
    
    plt.subplot(222)
    x=np.linspace(1.5,4.8,100)
    plt.plot(x,x,c='k',linestyle='dashed')
    plt.scatter(testlabels,infer,facecolors='none',edgecolors='r',label='Average',zorder=10)
    plt.text(1.5,4.5,s='m.std={}'.format(round(std_avg,3)))
    plt.text(1.5,4.2,s='RMS={}'.format(round(rms_av,2)))
    plt.text(1.5,3.9,s='Bias={}'.format(round(bias_av,3)))
    plt.xlabel('True')
    plt.ylabel('Inferred')
    plt.legend(loc=1)
    
    plt.subplot(223)
    x=np.linspace(1.5,4.8,100)
    plt.plot(x,x,c='k',linestyle='dashed')
    plt.scatter(testlabels,model_1,facecolors='none', edgecolors='g',label='Model-1')
    plt.text(1.5,4.5,s='m.std={}'.format(round(std_m1,3)))
    plt.text(1.5,4.2,s='RMS={}'.format(round(rms_m1,2)))
    plt.text(1.5,3.9,s='Bias={}'.format(round(bias_m1,3)))
    plt.xlabel('True')
    plt.ylabel('Inferred')
    plt.legend(loc=1)
    
    plt.subplot(224)
    x=np.linspace(1.5,4.8,100)
    plt.plot(x,x,c='k',linestyle='dashed')
    plt.scatter(testlabels,model_2,facecolors='none', edgecolors='b',label='Model-2')
    plt.text(1.5,4.5,s='m.std={}'.format(round(std_m2,3)))
    plt.text(1.5,4.2,s='RMS={}'.format(round(rms_m2,2)))
    plt.text(1.5,3.9,s='Bias={}'.format(round(bias_m2,3)))
    plt.xlabel('True')
    plt.ylabel('Inferred')
    plt.legend(loc=1)
    
    #plt.suptitle('Logg - 10 neighbours')
    plt.savefig(savedir+'results.png')
    # plt.show()
    print('average',std_avg,rms_av,bias_av)
    print('model_1',std_m1,rms_m1,bias_m1)
    print('model_2',std_m2,rms_m2,bias_m2)
def sigma_rob(data, iterations=1, thresh=3.0, axis=None):
    """
    Iterative m.a.d. based sigma with positive outlier rejection.
    """
    noise = mad_std(data, axis=axis)
    for _ in range(iterations):
        ind = (np.abs(data) <= thresh * noise).nonzero()
        noise = mad_std(data[ind], axis=axis)
    return noise
Esempio n. 10
0
    def compute_empirical_bg_sigma(self, careful_sky=False):

        if careful_sky:
            if self.segmap is None:
                self.set_segmap()

            # this could go wrong in pathological case that
            # segmap is nonzero for all pixels
            return mad_std(self.image[self.segmap.array == 0])
        else:
            return mad_std(self.image)
Esempio n. 11
0
def do_photometry(hdu, extensions=None, threshold=5, fwhm=2.5):

    if extensions is None:
        extensions = np.arange(1, len(hdu))
    if not isiterable(extensions):
        extensions = (extensions, )

    output = {}
    for ext in extensions:
        header = hdu[ext].header
        data = hdu[ext].data
        image_wcs = WCS(header)

        background = mad_std(data)

        sources = daofind(data, threshold=threshold * background, fwhm=fwhm)
        positions = (sources['xcentroid'], sources['ycentroid'])
        sky_positions = pixel_to_skycoord(*positions, wcs=image_wcs)

        apertures = CircularAperture(positions, r=2.)
        photometry_table = aperture_photometry(data, apertures)
        photometry_table['sky_center'] = sky_positions

        output[str(ext)] = photometry_table

    return output
Esempio n. 12
0
def generate_aperture(fluxes, n=5):
    flsum = np.nansum(fluxes, axis=0)
    thr   = mad_std(flsum)
    thm   = flsum > n*thr

    pos = np.column_stack(np.where(thm))
    db  = DBSCAN(eps=rc, min_samples=nc).fit(pos)
    #ncl = np.unique(db.labels_).size - (1 if -1 in db.labels_ else 0)

    clpos = np.transpose(pos[db.labels_ != -1])
    cluster = np.zeros(flsum.shape).astype(bool)
    cluster[clpos[0], clpos[1]] = True

    gauss = ndi.gaussian_filter(flsum, 0.5)
    gauss[~cluster] = 0

    nbh      = np.ones((3,3))
    localmax = (ndi.filters.maximum_filter(gauss, footprint=nbh) == gauss) & cluster
    maxloc   = np.column_stack(np.where(localmax))
    markers  = ndi.label(localmax)[0]
    labels   = watershed(-flsum, markers, mask=cluster)
    '''
    nstars   = labels.max()

    starmask = np.zeros((nstars, flsum.shape[0], flsum.shape[1])).astype(bool)
    for i in range(nstars):
        starmask[i][labels == i+1] = True
    '''

    return labels
Esempio n. 13
0
def secEclipse(params):
    per = params.tlsOut.period
    tdur = params.tlsOut.duration
    foldY = params.tlsOut.folded_y
    foldX = params.tlsOut.folded_phase
    depth = params.tlsOut.depth
    fit_b = params.impact

    seDepth = 1 - np.mean(foldY[(foldX < tdur / per / 2) |
                                (foldX > 1 - tdur / per / 2)])
    seDepthsd = mad_std(foldY[(foldX < tdur / per / 2) |
                              (foldX > 1 - tdur / per / 2)])
    if 0.1 * depth < seDepth:
        if fit_b >= .9:
            params.SE_found = True
            params.secEclipseFP = True
        else:
            params.SE_found = True
            params.secEclipseFP = False
    elif seDepth > 3 * seDepthsd:
        params.secEclipseFP = False
        params.SE_found = True

    else:
        params.secEclipseFP = False
        params.SE_found = False
    return params
Esempio n. 14
0
    def fit(self, niter=5):
        """Determine the fit of the model to the data points with rejection

        For each iteraction, a weight is calculated based on the distance a source
        is from the relationship and outliers are rejected.

        Parameters
        ----------
        niter: int
            Number of iteractions for the fit

        """
        fitter = md.fitting.LinearLSQFitter()
        weights = np.ones_like(self.x)
        for i in range(niter):
            self.model = fitter(self.model, self.x, self.wavelength, weights=weights)

            #caculate the weights based on the median absolute deviation
            r = (self.wavelength - self.model(self.x))
            s = stats.mad_std(r)
            biweight = lambda x: ((1.0 - x ** 2) ** 2.0) ** 0.5
            if s!=0:
                weights = 1.0/biweight(r / s)
            else:
                weights = np.ones(len(self.x))
Esempio n. 15
0
def save_fitbg(star):
    """
    Saves the results of the `fit_background` module.

    Parameters
    ----------
    star : target.Target
        pipeline target with the results of the `fit_background` routine

    """
    df = pd.DataFrame(star.fitbg['results'][star.name])
    if star.fitbg['convert']:
        df = convert_samples(df, drop=star.fitbg['drop'])
    star.df = df.copy()
    new_df = pd.DataFrame(columns=['parameter', 'value', 'uncertainty'])
    for c, col in enumerate(df.columns.values.tolist()):
        new_df.loc[c, 'parameter'] = col
        new_df.loc[c, 'value'] = df.loc[0,col]
        if star.fitbg['mc_iter'] > 1:
            new_df.loc[c, 'uncertainty'] = mad_std(df[col].values)
        else:
            new_df.loc[c, 'uncertainty'] = '--'
    new_df.to_csv('%sbackground.csv'%star.params[star.name]['path'], index=False)
    if star.fitbg['samples']:
        df.to_csv('%ssamples.csv'%star.params[star.name]['path'], index=False)
Esempio n. 16
0
 def photometry(self,filepath, b, d):
     hdulist = fits.open(filepath, ignore_missing_end=True)
     hdu = hdulist[0]
     hdu.data.shape
     image = hdu.data.astype(float)
     image -= np.median(image)
     bkg_sigma = mad_std(image)
     daofind = DAOStarFinder(fwhm=b, threshold=d * bkg_sigma)
     sources = daofind(image)  # Save Stellar Sources from DOA Star Algorithm
     for col in sources.colnames:
         sources[col].info.format = '%.8g'  # for consistent table output
     print(sources)
     # Perform Aperture Photometry
     positions = (sources['xcentroid'], sources['ycentroid'])
     apertures = CircularAperture(positions, r=17.)
     phot_table = aperture_photometry(image, apertures)
     for col in phot_table.colnames:
         phot_table[col].info.format = '%.8g'  # for consistent table output
     filedest1 = input("Where to Save the result:")
     filedest2 = input("Where to Save the result 2:")
     print(phot_table)
     np.savetxt(filedest1, (sources), delimiter=',')  # Save Result in CSV
     np.savetxt(filedest2, (phot_table), delimiter=',')  # Save Result in CSV
     plt.imshow(image, cmap='gray_r', origin='lower')
     apertures.plot(color='blue', lw=1.5, alpha=0.5)
     plt.show()
Esempio n. 17
0
def gaussian_kernel_acf(t,x,x_err,delta_tau,n):
    """ Compute the autocorrelation function for timeseries x(t) at lags
    k*delta_tau for k in 0..n using a gaussian kernel."""
    
    for i in range(5):
        x = (x-np.median(x))/mad_std(x)
        q = np.where(np.abs(x)<3.5)
        x = x[q]
        t = t[q]
        
    nx = len(x)

    x = (x-np.mean(x))/np.std(x)
    
    Ti, Tj = np.meshgrid(t,t)
    Tau = Tj - Ti
    
    Xi, Xj = np.meshgrid(x,x)
    Xj = np.tril(Xj,k=-1)

    acf = np.zeros(n)

    for k in range(n):
        h = k*delta_tau
        b = np.exp(-Tau**2/(2*h**2))/np.sqrt(2*np.pi*h)
        b = np.tril(b,k=-1)
        Xjb = Xj*b
        acf[k] = np.sum([np.dot(x[m]*np.ones(nx),Xjb[:,m]) for m in range(nx)])/np.sum(b)

    return acf
Esempio n. 18
0
    def ts(self, verbose=False, sigma_clip=4):
        '''  Reads in a timeseries from the file stored in data file ONLY.
        All data modification is to be done in the frequency domain.
        '''

        data = np.genfromtxt(self.data_file)
        #print data[0:5]
        data[:,1] = data[np.argsort(data[:,0]),1]  # re-order flux by time
        data[:,0] = data[np.argsort(data[:,0]),0]  # re-order time

        self.time = (data[:,0] - data[0,0]) * 24.0 * 3600.0  # start time at 0 secs
        self.flux = data[:,1]

        self.flux = self.flux[np.argsort(self.time)]
        self.time = self.time[np.argsort(self.time)]

        # remove data gaps
        self.time = self.time[(self.flux != 0) & (np.isfinite(self.flux))]
        self.flux = self.flux[(self.flux != 0) & (np.isfinite(self.flux))]

        self.flux = self.flux[np.argsort(self.time)]
        self.time = self.time[np.argsort(self.time)]

        self.flux_fix = self.flux
        sel = np.where(np.abs(self.flux_fix) < mad_std(self.flux_fix) * sigma_clip)
        self.flux_fix = self.flux_fix[sel]  # remove extreme values
        self.time_fix = self.time[sel]      # remove extreme values

        if verbose:
            print("Read file {}".format(self.data_file))
            print("Data points : {}".format(len(self.time)))
def get_bias(bias, gain=1):
    '''Calculate superbias and readnoise.

    Apply sigma-clipping to all given bias images.
    Calculate readnoise (median robust standard deviation multiplied by gain)
    Get superbias by averaging all bias images.

    Parameters
    ----------
    bias : 3D ndarray
        Array of bias images.
    gain : float, optional
        Electrons per ADU in given bias images (default is 1).

    Returns
    -------
    suber_bias : 2D ndarray
        Superbias image.
    read_noise : float
        Read noise in the current observations
    '''
    bias_clean = astats.sigma_clip(bias, sigma=5)
    read_noise = np.median(astats.mad_std(bias, axis=(1, 2))) * gain
    superbias = np.average(bias_clean, axis=0)
    superbias = superbias.filled(superbias.mean())
    return (superbias, read_noise)
Esempio n. 20
0
def running_MAD_2D(z, w, verbose=False, parallel=False):
    """Computers a running standard deviation of a 2-dimensional array z.
    The stddev is evaluated over the vertical block with width w pixels.
    The output is a 1D array with length equal to the width of z.
    This is very slow on arrays that are wide in x (hundreds of thousands of points)."""
    import astropy.stats as stats
    import numpy as np
    from tayph.vartests import typetest, dimtest, postest
    import tayph.util as ut
    if parallel: from joblib import Parallel, delayed
    typetest(z, np.ndarray, 'z in fun.running_MAD_2D()')
    dimtest(z, [0, 0], 'z in fun.running_MAD_2D()')
    typetest(w, [int, float], 'w in fun.running_MAD_2D()')
    postest(w, 'w in fun.running_MAD_2D()')
    size = np.shape(z)
    ny = size[0]
    nx = size[1]
    s = np.arange(0, nx, dtype=float) * 0.0
    dx1 = int(0.5 * w)
    dx2 = int(int(0.5 * w) + (w % 2))  #To deal with odd windows.
    for i in range(nx):
        minx = max([0, i - dx1])  #This here is only a 3% slowdown.
        maxx = min([nx, i + dx2])
        s[i] = stats.mad_std(
            z[:, minx:maxx],
            ignore_nan=True)  #This is what takes 97% of the time.
        if verbose: ut.statusbar(i, nx)
    return (s)
Esempio n. 21
0
def chunk_stats(list_data, chunk_size=15):
    """Cut the datasets in chunks and take the median
    Return the set of medians
    """
    ndatasets = len(list_data)

    nchunk_x = np.int(list_data[0].shape[0] // chunk_size - 1)
    nchunk_y = np.int(list_data[0].shape[1] // chunk_size - 1)
    # Check that all datasets have the same size
    med_data = np.zeros((ndatasets, nchunk_x * nchunk_y), dtype=np.float32)
    std_data = np.zeros_like(med_data)

    if not all([d.size for d in list_data]):
        upipe.print_error(
            "Datasets are not of the same size in median_compare")
    else:
        for i in range(0, nchunk_x):
            for j in range(0, nchunk_y):
                for k in range(ndatasets):
                    # Taking the median
                    med_data[k, i * nchunk_y + j] = np.median(
                        list_data[k][i * chunk_size:(i + 1) * chunk_size,
                                     j * chunk_size:(j + 1) * chunk_size])
                    std_data[k, i * nchunk_y + j] = mad_std(
                        list_data[k][i * chunk_size:(i + 1) * chunk_size,
                                     j * chunk_size:(j + 1) * chunk_size])

    return med_data, std_data
Esempio n. 22
0
def sigma_clip(array,nsigma=3.0,MAD=False):
    """This returns the n-sigma boundaries of an array, mainly used for scaling plots.

    Parameters
    ----------
    array : list, np.ndarray
        The array from which the n-sigma boundaries are required.

    nsigma : int, float
        The number of sigma's away from the mean that need to be provided.

    MAD : bool
        Use the true standard deviation or MAD estimator of the standard deviation
        (works better in the presence of outliers).

    Returns
    -------
    vmin,vmax : float
        The bottom and top n-sigma boundaries of the input array.
    """
    from tayph.vartests import typetest
    import numpy as np
    typetest(array,[list,np.ndarray],'array in fun.sigma_clip()')
    typetest(nsigma,[int,float],'nsigma in fun.sigma_clip()')
    typetest(MAD,bool,'MAD in fun.sigma_clip()')
    m = np.nanmedian(array)
    if MAD:
        from astropy.stats import mad_std
        s = mad_std(array,ignore_nan=True)
    else:
        s = np.nanstd(array)
    vmin = m-nsigma*s
    vmax = m+nsigma*s
    return vmin,vmax
Esempio n. 23
0
    def Periodogram(self, madVar=True):
        """ This function computes the power spectrum from the timeseries ONLY.
        """

        dtav = np.mean(np.diff(self.time_fix))  # mean value of time differences (s)
        dtmed = np.median(np.diff(self.time_fix))  # median value of time differences (s)
        if dtmed == 0:  dtmed = dtav

        # compute periodogram from regular frequency values
        fmin = 0  # minimum frequency
        N = len(self.time_fix)  # n-points
        df = 1./(dtmed*N)  # bin width (1/Tobs) (in Hz)
        model = LombScargleFast().fit(self.time_fix, self.flux_fix, np.ones(N))
        power = model.score_frequency_grid(fmin, df, N/2)  # signal-to-noise ratio, (1) eqn 9
        freqs = fmin + df * np.arange(N/2)  # the periodogram was computed over these freqs (Hz)

        # the variance of the flux
        if madVar:  var = mad_std(self.flux_fix)**2
        else:       var = np.std(self.flux_fix)**2

        # convert to PSD, see (1) eqn 1, 8, 9 & (2)
        power /= np.sum(power)  # make the power sum to unity (dimensionless)
        power *= var  # Parseval's theorem. time-series units: ppm. variance units: ppm^2
        power /= df * 1e6  # convert from ppm^2 to ppm^2 muHz^-1

        if len(freqs) < len(power):  power = power[0:len(freqs)]
        if len(freqs) > len(power):  freqs = freqs[0:len(power)]

        self.freq = freqs * 1e6    # muHz
        self.power = power         # ppm^2 muHz^-1
        self.bin_width = df * 1e6  # muHz
Esempio n. 24
0
File: test.py Progetto: SKIRT/PTS
    def calculate_statistics_no_clipping(self):

        """
        This function ...
        :return:
        """

        # Compress (remove masked values)
        flattened = np.ma.array(self.sources_with_noise.data, mask=self.rotation_mask.data).compressed()

        median = np.median(flattened)
        biweight_loc = biweight_location(flattened)

        biweight_midvar = biweight_midvariance(flattened)
        median_absolute_deviation = mad_std(flattened)

        #print("median", median)
        #print("biweigth_loc", biweight_loc)
        #print("biweight_midvar", biweight_midvar)
        #print("median_absolute_deviation", median_absolute_deviation)

        self.statistics.no_clipping = Map()
        self.statistics.no_clipping.median = median
        self.statistics.no_clipping.biweight_loc = biweight_loc
        self.statistics.no_clipping.biweight_midvar = biweight_midvar
        self.statistics.no_clipping.median_absolute_deviation = median_absolute_deviation
Esempio n. 25
0
def simple_para_clean(pmaps, ncomp, npara=4):
    # clean parameter maps based on their error values

    pmaps = pmaps.copy()

    # remove component with vlsrErr that is number of sigma off from the median as specified below
    std_thres = 2

    pmaps[pmaps == 0] = np.nan

    # loop through each component
    for i in range(0, ncomp):
        # get the STD and Medians of the vlsr errors
        std_vErr = mad_std(pmaps[(i + ncomp) * npara][np.isfinite(
            pmaps[(i + ncomp) * npara])])
        median_vErr = np.median(pmaps[(i + ncomp) * npara][np.isfinite(
            pmaps[(i + ncomp) * npara])])

        # remove outlier pixels
        mask = pmaps[(i + ncomp) * npara] > median_vErr + std_vErr * std_thres

        pmaps[i * npara:(i + 1) * npara, mask] = np.nan
        pmaps[(i + ncomp) * npara:(i + ncomp + 1) * npara, mask] = np.nan

    return pmaps
Esempio n. 26
0
def fit_2d(x, y, z, x0, y0, weights=None, order=4, niter=3):
    # Fit
    sx = (x - np.mean(x))/np.max(x)
    sy = (y - np.mean(y))/np.max(y)

    X = make_series(1.0, sx, sy, order=order)
    X = np.vstack(X).T
    Y = z

    idx = np.isfinite(Y)

    for i in range(niter):
        if weights is not None:
            C = sm.WLS(Y[idx], X[idx], weights=weights[idx]).fit()
        else:
            C = sm.RLM(Y[idx], X[idx]).fit()

        YY = np.sum(X*C.params, axis=1)
        idx = np.abs(Y-YY - np.median((Y-YY)[idx])) < 3.0*mad_std((Y-YY)[idx])

    # Predict
    sx = (x0 - np.mean(x))/np.max(x)
    sy = (y0 - np.mean(y))/np.max(y)

    X = make_series(1.0, sx, sy, order=order)
    X = np.vstack(X).T

    return np.sum(X*C.params, axis=1)
Esempio n. 27
0
 def CalcBkg(self):
   if not isinstance(self.img, np.ndarray): return
   from astropy.stats import mad_std
   bkg   = np.median(self.img)
   sigma = mad_std(self.img)
   self.bkg       = bkg
   self.bkg_sigma = sigma
Esempio n. 28
0
def findStars(image):
    # In order to use the MAD as a consistent estimator for the estimation
    # of the standard deviation σ, one takes σ = k * MAD where k is a constant
    # scale factor, which depends on the distribution. For normally distributed
    # data k is taken to be k = 1.48[26]
    bkg_sigma = 1.48 * mad(image)
    t = 5 * bkg_sigma
    daofind = DAOStarFinder(fwhm=3.0, threshold=t)
    stars = daofind(image)

    #stars['signal'] = stars['flux'] * t
    #
    data = image
    mask = make_source_mask(data, snr=2, npixels=5, dilate_size=11)
    mean, median, std = sigma_clipped_stats(data, sigma=3.0, mask=mask)
    madstd = mad_std(data)

    snrs = []
    for peak in stars['peak']:
        snrs.append(peak / madstd / 7.4)
    stars['snr'] = snrs
    print((mean, median, std, bkg_sigma, t, madstd))
    #
    #print stars
    return stars
Esempio n. 29
0
File: test.py Progetto: rag9704/PTS
    def calculate_statistics_no_clipping(self):
        """
        This function ...
        :return:
        """

        # Compress (remove masked values)
        flattened = np.ma.array(self.sources_with_noise.data,
                                mask=self.rotation_mask.data).compressed()

        median = np.median(flattened)
        biweight_loc = biweight_location(flattened)

        biweight_midvar = biweight_midvariance(flattened)
        median_absolute_deviation = mad_std(flattened)

        #print("median", median)
        #print("biweigth_loc", biweight_loc)
        #print("biweight_midvar", biweight_midvar)
        #print("median_absolute_deviation", median_absolute_deviation)

        self.statistics.no_clipping = Map()
        self.statistics.no_clipping.median = median
        self.statistics.no_clipping.biweight_loc = biweight_loc
        self.statistics.no_clipping.biweight_midvar = biweight_midvar
        self.statistics.no_clipping.median_absolute_deviation = median_absolute_deviation
    def median_fwhm(self, direction):
        """Returns the sigma-clipped median fitted FWHM over both X 
           and Y in pixels over all stars that fitted successfully, 
           along with median absolute deviation (MAD) standard deviation.
           
        Note that the deviation return is standard deviation based on
        the MAD, not the MAD itself. See astropy.stats.mad_std
        
        Direction must be one of 'both', 'x', or  'y'
        """

        # Clip values that are more than this number of sigma from the
        # median.
        numsig = 3.0

        # Select only cases where the fitting appeared to work correctly.
        ok_x = self._fit_table['fwhm_x'][self._fit_table['fit_ok']]
        ok_y = self._fit_table['fwhm_y'][self._fit_table['fit_ok']]

        if 'both' in direction:
            ok_fwhm = np.concatenate((ok_x, ok_y))  # Note inputs as tuple
        elif 'x' in direction:
            ok_fwhm = ok_x
        elif 'y' in direction:
            ok_fwhm = ok_y

        clipped = sigma_clip(ok_fwhm, sigma=numsig, masked=False)
        num_used = len(clipped)
        self._logger.debug(
            f'Estimating median FWHM (direction={direction}) using {num_used} FWHM measurements ({len(ok_fwhm)} OK fits before clipping).'
        )

        median_fwhm = float(np.median(clipped))
        madstd_fwhm = float(mad_std(clipped))
        return (median_fwhm, madstd_fwhm, num_used)
    def fit(self, niter=5):
        """Determine the fit of the model to the data points with rejection

        For each iteraction, a weight is calculated based on the distance a source
        is from the relationship and outliers are rejected.

        Parameters
        ----------
        niter: int
            Number of iteractions for the fit

        """
        fitter = md.fitting.LinearLSQFitter()
        weights = np.ones_like(self.x)
        for i in range(niter):
            self.model = fitter(self.model,
                                self.x,
                                self.wavelength,
                                weights=weights)

            #caculate the weights based on the median absolute deviation
            r = (self.wavelength - self.model(self.x))
            s = stats.mad_std(r)
            biweight = lambda x: ((1.0 - x**2)**2.0)**0.5
            if s != 0:
                weights = 1.0 / biweight(r / s)
            else:
                weights = np.ones(len(self.x))
Esempio n. 32
0
def _remove_bad_pixels(col_index, dtype):
    global original, original_shape, scaled, scaled_shape, output, output_shape, lambdas, img_center, psfs, psfs_shape, Npixproc, Npixtot
    original_np = _arraytonumpy(original, original_shape, dtype=dtype)
    tmpcube = copy(original_np[:, :, col_index])
    nan_mask_boxsize = 3
    x = np.arange(nl)
    for m in np.arange(0, ny):
        myvec = tmpcube[:, m]
        wherefinite = np.where(np.isfinite(myvec))
        if np.size(wherefinite[0]) < 10:
            continue
        smooth_vec = median_filter(myvec,
                                   footprint=np.ones(100),
                                   mode="constant",
                                   cval=0.0)
        myvec = myvec - smooth_vec
        wherefinite = np.where(np.isfinite(myvec))
        mad = mad_std(myvec[wherefinite])
        original_np[np.where(np.abs(myvec) > 7 * mad)[0], m,
                    col_index] = np.nan
        widen_nans = np.where(
            np.isnan(
                np.correlate(original_np[:, m, col_index],
                             np.ones(nan_mask_boxsize),
                             mode="same")))[0]
        original_np[widen_nans, m, col_index] = np.nan
Esempio n. 33
0
def compute_stats(x):
	""" Compute stats """
	
	## Compute stats
	npixels= np.size(x)
	pixel_min= np.min(x)
	pixel_max= np.max(x)
	mean= np.mean(x)
	stddev= np.std(x,ddof=1)
	median= np.median(x)
	mad= mad_std(x)
	skewness= stats.skew(x)
	kurtosis= stats.kurtosis(x)
	
	## Compute robust stats
	niter= 1
	sigmaclip= 3
	[mean_clipped, median_clipped, stddev_clipped] = sigma_clipped_stats(x, sigma=sigmaclip, iters=niter, std_ddof=1)

	print '*** IMG STATS ***'	
	print 'n=',npixels
	print 'min/max=',pixel_min,'/',pixel_max
	print 'mean=',mean
	print 'stddev=',stddev
	print 'median=',median
	print 'mad=',mad
	print 'skew=',skewness
	print 'kurtosis=',kurtosis
	print 'mean_clipped=',mean_clipped
	print 'median_clipped=',median_clipped
	print 'stddev_clipped=',stddev_clipped
	print '*****************'
 def init_parameters(self):
     name = self.name
     wns = log10(mad_std(diff(self.fluxes)) / sqrt(2))
     pgp = [
         LParameter(f'{name}_ln_out',
                    f'{name} ln output scale',
                    '',
                    NP(-6, 1.5),
                    bounds=(-inf, inf)),
         LParameter(f'{name}_ln_in',
                    f'{name} ln input scale',
                    '',
                    UP(-8, 8),
                    bounds=(-inf, inf)),
         LParameter(f'{name}_log10_wn',
                    f'{name} log10 white noise sigma',
                    '',
                    NP(wns, 0.025),
                    bounds=(-inf, inf))
     ]
     self.lpf.ps.thaw()
     self.lpf.ps.add_global_block(self.name, pgp)
     self.lpf.ps.freeze()
     self.pv_slice = self.lpf.ps.blocks[-1].slice
     self.pv_start = self.lpf.ps.blocks[-1].start
     setattr(self.lpf, f"_sl_{name}", self.pv_slice)
     setattr(self.lpf, f"_start_{name}", self.pv_start)
Esempio n. 35
0
    def fit_flux(self, **kwargs):
        """ """
        from .fitter import DiffImgFitter
        from astropy.stats import mad_std

        # Load the fitter
        self.fitter = DiffImgFitter(self.diffimg, self.psfimg, 0, shape=self.psfshape)

        # Load the fitter
        robust_nmad = mad_std(self.fitter.data[~np.isnan(self.fitter.data)])
        fit_prop = {
            "ampl_guess": np.nanmax(self.fitter.data)
            * (np.sqrt(2 * np.pi * 2 ** 2)),  # 2 sigma guess
            "sigma_guess": robust_nmad,
        }
        fit_prop["ampl_boundaires"] = [
            -2 * np.nanmin(self.fitter.data) * (np.sqrt(2 * np.pi * 2 ** 2)),
            5 * np.nanmax(self.fitter.data) * (np.sqrt(2 * np.pi * 2 ** 2)),
        ]
        fit_prop["sigma_boundaries"] = [
            robust_nmad / 10.0,
            np.nanstd(self.fitter.data) * 2,
        ]

        # Return fitter output
        return self.fitter.fit(**{**fit_prop,**kwargs})
Esempio n. 36
0
    def _stats_data(self, stats, mask, scipy, astropy, decimals_mode):
        data = self.data

        # The original data size, for computation of valid elements and how
        # many are masked/invalid.
        size_initial = data.size

        # Delete masked values, this will directly convert it to a 1D array
        # if the mask is not appropriate then ravel it.
        data = data[~mask]
        size_masked = data.size

        # Delete invalid (NaN, Inf) values. This should ensure that the result
        # is always a 1D array
        data = data[np.isfinite(data)]
        size_valid = data.size
        stats['elements'] = [size_valid]

        stats['min'] = [np.amin(data)]
        stats['max'] = [np.amax(data)]
        stats['mean'] = [np.mean(data)]
        stats['median'] = [np.median(data)]
        # Use custom mode defined in this package because scipy.stats.mode is
        # very, very slow and by default tries to calculate the mode along
        # axis=0 and not for the whole array.
        # Take the first element since the second is the number of occurences.
        stats['mode'] = [mode(data, decimals=decimals_mode)[0]]

        if astropy:
            stats['biweight_location'] = [biweight_location(data)]

        stats['std'] = [np.std(data)]

        if astropy:
            stats['mad'] = [mad_std(data)]
            stats['biweight_midvariance'] = [biweight_midvariance(data)]

        stats['var'] = [np.var(data)]

        if scipy:  # pragma: no cover
            if not OPT_DEPS['SCIPY']:
                log.info('SciPy is not installed.')
            else:
                # Passing axis=None should not be important since we already
                # boolean indexed the array and it's 1D. But it's important
                # to remember that there default is axis=0 and not axis=None!
                stats['skew'] = [skew(data, axis=None)]
                stats['kurtosis'] = [kurtosis(data, axis=None)]

        stats['masked'] = [size_initial - size_masked]
        stats['invalid'] = [size_masked - size_valid]

        return data
Esempio n. 37
0
def find_sources(imageFile, data, seeing_in_pix, threshold=5.):
    # estimate the 1-sigma noise level using the median absolute deviation of the image
    print "[*] Estimating 1-sigma noise level."
    # generate a mask for 0 pixel counts. These are chip gaps or skycell edges generated by
    # np.nan_to_num and will affect noise level estimate.
    mask = np.where(data != 0)
    bkg_sigma = mad_std(data[mask])
    #print np.median(data), mad(data), bkg_sigma
    # use daofind to detect sources setting
    print "[*] Detecting %d-sigma sources in %s" % (threshold, imageFile)
    sources = daofind(data, fwhm=seeing_in_pix, threshold=threshold*bkg_sigma)
    print "[*] Source detection successful."
    print "\t[i] %d sources detected: " % (len(sources["xcentroid"]))
    print
    print sources
    return sources, bkg_sigma
Esempio n. 38
0
    def mad_std(self):
        """
        A robust standard deviation using the `median absolute deviation
        (MAD)
        <http://en.wikipedia.org/wiki/Median_absolute_deviation>`_.
        The MAD is defined as ``median(abs(a - median(a)))``.

        The standard deviation estimator is given by:

        .. math::

            \\sigma \\approx \\frac{\\textrm{MAD}}{\Phi^{-1}(3/4)} \\approx 1.4826 \ \\textrm{MAD}

        where :math:`\Phi^{-1}(P)` is the normal inverse cumulative
        distribution function evaluated at probability :math:`P = 3/4`.
        """

        return mad_std(self.goodvals)
def photometry(data, mywcs, regs, beam):
    results = {}
    for ii,reg in enumerate(regs):
        if 'text' not in reg.meta:
            name = str(ii)
        else:
            name = reg.meta['text'].strip("{}")

        # all regions are points: convert them to 0.5" circles
        phot_reg = regions.CircleSkyRegion(center=reg.center, radius=0.5*u.arcsec)
        pixreg = phot_reg.to_pixel(mywcs)

        bgreg = regions.CircleSkyRegion(center=reg.center, radius=1.5*u.arcsec).to_pixel(mywcs)

        log.info(name)

        mask = pixreg.to_mask()
        cutout = mask.cutout(data) * mask.data

        # how do I make an annulus?
        bgmask = bgreg.to_mask()
        
        # manualannulus
        diff = bgmask.shape[0]-mask.shape[0]
        bgm = bgmask.data.astype('bool')
        bgm[int(diff/2):-int(diff/2), int(diff/2):-int(diff/2)] ^= mask.data.astype('bool')
        assert bgm.sum() == bgmask.data.sum() - mask.data.sum()

        bgcutout = bgmask.cutout(data) * bgm

        results[name] = {'peak': cutout.max(),
                         'sum': cutout.sum(),
                         'bgrms': bgcutout.std(),
                         'bgmad': mad_std(bgcutout),
                         'npix': mask.data.sum(),
                         'beam_area': beam.sr,
                         'RA': reg.center.ra[0],
                         'Dec': reg.center.dec[0],
                        }

    return results
Esempio n. 40
0
def find_sources(image):
    """Return sources (x, y) sorted by brightness.
    """
    from scipy import ndimage
    from astropy.stats import mad_std

    img1 = image.copy().astype('float32')
    m, s = np.median(image), mad_std(image)
    src_mask = image > m + 3.0 * s
    # set the background to the min value of the sources
    img1[~src_mask] = img1[src_mask].min()
    # this rescales (min,max) to (0,1)
    img1 = (img1.min() - img1) / (img1.min() - img1.max())
    img1[~src_mask] = 0.

    def obj_params_with_offset(img, labels, aslice, label_idx):
        y_offset = aslice[0].start
        x_offset = aslice[1].start
        thumb = img[aslice]
        lb = labels[aslice]
        yc, xc = ndimage.center_of_mass(thumb, labels=lb, index=label_idx)
        br = thumb[lb == label_idx].sum()  # the intensity of the source
        return [br, xc + x_offset, yc + y_offset]

    srcs_labels, num_srcs = ndimage.label(img1)

    if num_srcs < 10:
        print("WARNING: Only %d sources found." % (num_srcs))

    # Eliminate here all 1 pixel sources
    all_objects = [[ind + 1, aslice] for ind, aslice
                   in enumerate(ndimage.find_objects(srcs_labels))
                   if srcs_labels[aslice].shape != (1, 1)]
    lum = np.array([obj_params_with_offset(img1, srcs_labels, aslice, lab_idx)
                    for lab_idx, aslice in all_objects])

    lum = lum[lum[:, 0].argsort()[::-1]]  # sort by brightness descending order

    return lum[:, 1:]
Esempio n. 41
0
def PCA_light_curve(pr, transit_parameters, buffer_time=5*u.min,
                    outlier_mad_std_factor=3.0, plots=False,
                    validation_duration_fraction=1/6,
                    flux_threshold=0.89, validation_time=-0.65,
                    plot_validation=False, outlier_rejection=True):
    """
    Parameters
    ----------
    pr : `~toolkit.PhotometryResults`
    transit_parameters : `~batman.TransitParams`
    buffer_time : `~astropy.units.Quantity`
    outlier_mad_std_factor : float
    plots : bool
    validation_duration_fraction :  float

    Returns
    -------
    best_lc : `~numpy.ndarray`
    """
    expected_mid_transit_jd = ((np.max(np.abs(pr.times - transit_parameters.t0) //
                                       transit_parameters.per) ) * # need to add +1 here for 20170502, don't know why TMP
                               transit_parameters.per + transit_parameters.t0)
    mid_transit_time = Time(expected_mid_transit_jd, format='jd')

    transit_duration = transit_parameters.duration + buffer_time

    final_lc_mad = np.ones(len(pr.aperture_radii))

    final_lc = None
    figures = []

    for aperture_index in range(len(pr.aperture_radii)):
        target_fluxes = pr.fluxes[:, 0, aperture_index]
        target_errors = pr.errors[:, 0, aperture_index]

        if not outlier_rejection:
            inliers = np.ones_like(pr.fluxes[:, 0, aperture_index]).astype(bool)

        else:
            inliers = np.ones_like(pr.fluxes[:, 0, aperture_index]).astype(bool)

            for i in range(pr.fluxes.shape[1]):
                flux_i = pr.fluxes[:, i, aperture_index]

                linear_flux_trend = np.polyval(np.polyfit(pr.times - pr.times.mean(),
                                                          flux_i, 1),
                                               pr.times - pr.times.mean())
                new_inliers = (np.abs(flux_i - linear_flux_trend) < outlier_mad_std_factor *
                               mad_std(flux_i))
                inliers &= new_inliers

        out_of_transit = ((Time(pr.times, format='jd') > mid_transit_time + transit_duration/2) |
                          (Time(pr.times, format='jd') < mid_transit_time - transit_duration/2))

        validation_duration = validation_duration_fraction * transit_duration

        validation_mask = ((Time(pr.times, format='jd') < mid_transit_time +
                            validation_time * transit_duration + validation_duration / 2) &
                           (Time(pr.times, format='jd') > mid_transit_time +
                            validation_time * transit_duration - validation_duration / 2))

        oot = out_of_transit & inliers
        oot_no_validation = (out_of_transit & inliers & np.logical_not(validation_mask))

        if plot_validation:
            plt.figure()
            plt.plot(pr.times[~oot], target_fluxes[~oot], '.', label='in-t')
            plt.plot(pr.times[oot], target_fluxes[oot], '.', label='oot')
            plt.plot(pr.times[validation_mask], target_fluxes[validation_mask], '.',
                     label='validation')
            plt.axvline(mid_transit_time.jd, ls='--', color='r', label='midtrans')
            plt.legend()
            plt.title(np.count_nonzero(validation_mask))
            plt.xlabel('JD')
            plt.ylabel('Flux')
            plt.show()

        ones = np.ones((len(pr.times), 1))
        regressors = np.hstack([pr.fluxes[:, 1:, aperture_index],
                                pr.xcentroids[:, 0, np.newaxis],
                                pr.ycentroids[:, 0, np.newaxis],
                                pr.airmass[:, np.newaxis],
                                pr.airpressure[:, np.newaxis],
                                pr.humidity[:, np.newaxis],
                                pr.background_median[:, np.newaxis]
                                ])

        n_components = np.arange(2, regressors.shape[1])


        def train_pca_linreg_model(out_of_transit_mask, oot_no_validation_mask, n_comp):

            # OOT chunk first:
            pca = PCA(n_components=n_comp)
            reduced_regressors = pca.fit_transform(regressors[out_of_transit_mask],
                                                   target_fluxes[out_of_transit_mask])

            prepended_regressors_oot = np.hstack([ones[out_of_transit_mask],
                                                  reduced_regressors])
            c_oot = regression_coeffs(prepended_regressors_oot,
                                      target_fluxes[out_of_transit_mask],
                                      target_errors[out_of_transit_mask])

            lc_training = (target_fluxes[out_of_transit_mask] -
                           regression_model(c_oot, prepended_regressors_oot))

            median_oot = np.median(target_fluxes[out_of_transit_mask])
            std_lc_training = np.std((lc_training + median_oot) / median_oot)

            # Now on validation chunk:
            reduced_regressors_no_validation = pca.fit_transform(regressors[oot_no_validation_mask],
                                                                 target_fluxes[oot_no_validation_mask])

            prepended_regressors_no_validation = np.hstack([ones[oot_no_validation_mask],
                                                            reduced_regressors_no_validation])
            c_no_validation = regression_coeffs(prepended_regressors_no_validation,
                                                target_fluxes[oot_no_validation_mask],
                                                target_errors[oot_no_validation_mask])

            lc_validation = (target_fluxes[out_of_transit_mask] -
                             regression_model(c_no_validation, prepended_regressors_oot))

            std_lc_validation = np.std((lc_validation + median_oot) / median_oot)

            return lc_training, lc_validation, std_lc_training, std_lc_validation


        stds_validation = np.zeros_like(n_components, dtype=float)
        stds_training = np.zeros_like(n_components, dtype=float)

        for i, n_comp in enumerate(n_components):

            results = train_pca_linreg_model(oot, oot_no_validation, n_comp)
            lc_training, lc_validation, std_lc_training, std_lc_validation = results
            stds_validation[i] = std_lc_validation
            stds_training[i] = std_lc_training

        best_n_components = n_components[np.argmin(stds_validation)]
        if plots:
            fig = plt.figure()
            plt.plot(n_components, stds_validation, label='validation')
            plt.plot(n_components, stds_training, label='training')
            plt.xlabel('Components')
            plt.ylabel('std')
            plt.axvline(best_n_components, color='r', ls='--')
            plt.title("Aperture: {0} (index: {1})"
                      .format(pr.aperture_radii[aperture_index],
                              aperture_index))
            plt.legend()
            figures.append(fig)

        # Now apply PCA to generate light curve with best number of components
        pca = PCA(n_components=best_n_components)
        reduced_regressors = pca.fit_transform(regressors[oot], target_fluxes[oot])

        all_regressors = pca.transform(regressors)
        prepended_all_regressors = np.hstack([ones, all_regressors])

        prepended_regressors_oot = np.hstack([ones[oot], reduced_regressors])
        c_oot = regression_coeffs(prepended_regressors_oot,
                                  target_fluxes[oot],
                                  target_errors[oot])

        best_lc = ((target_fluxes - regression_model(c_oot, prepended_all_regressors)) /
                   np.median(target_fluxes)) + 1

        final_lc_mad[aperture_index] = mad_std(best_lc[out_of_transit])

        if final_lc_mad[aperture_index] == np.min(final_lc_mad):
            final_lc = best_lc.copy()

    if plots:
        # Close all validation plots except the best aperture's
        for i, fig in enumerate(figures):
            if i != np.argmin(final_lc_mad):
                plt.close(fig)

        plt.figure()
        plt.plot(pr.aperture_radii, final_lc_mad)
        plt.axvline(pr.aperture_radii[np.argmin(final_lc_mad)], ls='--', color='r')
        plt.xlabel('Aperture radii')
        plt.ylabel('mad(out-of-transit light curve)')

        plt.figure()
        plt.plot(pr.times, final_lc, 'k.')
        plt.xlabel('Time [JD]')
        plt.ylabel('Flux')
        plt.show()
    return final_lc
Esempio n. 42
0
def check_matches(files, cols,
                  neighbor,
                  upperlimits=[4, 10.0],
                  printlist=False,
                  debug=False,
                   **keyword_parameter):

    """

    *preforms self-neighbour matching of file and returns diagnostic plots.

    Parameters
    ----------

    files: <type 'str'>
                Name of file

    columns : <type 'ndarray'> or <type 'list'>
                array of column names needed. Must be
                <type 'str'>.

    neighbor: <type 'int'>
                which nth-neighbor match needed.

    Returns
    -------

    matplotlib image

    Examples
    --------

    f1 = "output_DR12_1p44UKIDSSlas_4p0WISE_starL_GMM5QSOs.fits"
    final_array = check_matches(f1,['ra','dec','psfMag_i'],2)
    final_array = check_matches(f1,['ra','dec','psfMag_i'],2, save = '/Desktop/important_plot.png')

    """
    from astropy.table import Table

    figsize=(8,6)

    median_and_mean = [[],[]]

    print('files:', files)
    match_object = files
    columns_object = cols

    # read in the data file
    data = Table.read(files)
    data.info('stats')

    #to_match_RA = fitsio.read(match_object, columns=columns_object[0])
    #to_match_DEC = fitsio.read(match_object, columns=columns_object[1])
    #psfmag=fitsio.read(match_object, columns=columns_object[2])

    #to_match_RA = Table.read(match_object, columns=columns_object[0])
    #to_match_DEC = Table.read(match_object, columns=columns_object[1])
    #psfmag= Table.read(match_object, columns=columns_object[2])

    # help(data)
    # help(to_match_RA)

    print(columns_object[0])
    to_match_RA = data[columns_object[0]]
    to_match_DEC = data[columns_object[1]]
    psfmag = data[columns_object[3]]
    # help(to_match_RA)
    print(to_match_RA.unit)
    print(to_match_RA.shape)
    print(len(to_match_RA), np.min(to_match_RA), np.max(to_match_RA))

    vot = True
    # check units and convert u.deg id needed
    if to_match_RA.unit != 'deg':
        to_match_RA = to_match_RA * u.deg
    if to_match_DEC.unit != 'deg':
        to_match_DEC = to_match_DEC * u.deg

    skycoord_object = SkyCoord(to_match_RA, to_match_DEC,
                                   frame='icrs')

    # matches to self
    idx, d2d, d3d = match_coordinates_sky(skycoord_object, skycoord_object,
                                          nthneighbor=neighbor)
    idx2 = np.asarray([i for i in range(len(idx))])

    #set limits
    separations = np.asarray(d2d)*3600.0

    itest =  (separations < upperlimits[0])
    result = data[itest]
    result_separations = separations[itest]
    print(upperlimits[0])
    print(result_separations)
    if printlist:
        for irow, row in enumerate(result):
            print(irow,
                  row['ra'],
                  row['dec'],
                  row['dist'] * 3600.0,
                  result_separations[irow],
                  row['phot_g_mean_mag'])


    upperlimit = upperlimits[0]
    upperlimit2 = upperlimits[1]
    separations_reduced = separations[(separations<=upperlimit)]
    separations_orig = separations[(separations<=upperlimit2)]
    psfmag_reduced=np.asarray(psfmag)[(separations<=upperlimit)]

    # separations_reduced = separations[(np.asarray(psfmag)<18.0)*(separations<=upperlimit2)]
    # separations_orig = separations[(separations<=upperlimit2)]
    # psfmag_reduced=np.asarray(psfmag)[(np.asarray(psfmag)<18.0)]

    masked_list_ra = np.asarray(skycoord_object.ra)[(idx)]
    masked_list_dec = np.asarray(skycoord_object.dec)[(idx)]
    masked_list_ra_cat = np.asarray(skycoord_object.ra)
    masked_list_dec_cat = np.asarray(skycoord_object.dec)
    # masked = skycoord_object[idx]
    # dra, ddec = skycoord_object.spherical_offsets_to(masked)
    # sky = SkyCoord(masked_list_ra*u.degree, masked_list_dec*u.degree, frame='icrs')
    # dra, ddec = skycoord_object.spherical_offsets_to(sky)
    # dra=float(dra.to(u.arcsec))
    # ddec=float(dra.to(u.arcsec))

    difference_ra = ((((masked_list_ra_cat-masked_list_ra)*np.cos(np.radians(masked_list_dec_cat))))*3600.0)
    difference_dec = (((masked_list_dec_cat-masked_list_dec))*3600.0)

    #final array
    idx_pairs = idx[(separations<=upperlimits[0])]
    idx_pairs_second = idx2[(separations<=upperlimits[0])]

    masked_list_ra_pairs1 = np.asarray(skycoord_object.ra)[(idx_pairs)]
    masked_list_dec_pairs1 = np.asarray(skycoord_object.dec)[(idx_pairs)]

    masked_list_ra_pairs2 = np.asarray(skycoord_object.ra)[(idx_pairs_second)]
    masked_list_dec_pairs2 = np.asarray(skycoord_object.dec)[(idx_pairs_second)]

    median_and_mean = [list(difference_ra),list(difference_dec)]
    median_and_mean = np.asarray(median_and_mean)
    mad_standard = mad_std(median_and_mean)
    mad_median = mad_med(median_and_mean)
    length = len(masked_list_ra)
    length30 = len(separations_reduced)
    med = np.median(separations)
    med_red = np.median(separations_reduced)

    fig = plt.figure(1, figsize=(8,6))
    print('files:', files, len(files))
    print("file: %s" % files)
    plt.suptitle("file: %s"% files, size=10)
    #pylab.title("file: %s"% files,size=14, fontsize='medium')

    ax1=fig.add_subplot(2,2,1)

    print(len(separations_orig))
    print(upperlimit2)
    n, b, patches = ax1.hist(separations_orig,
                             bins=int(upperlimit2/0.5),
                             range=[0.0, upperlimit2],
                             color='green', alpha=0.3)
    bin_min = np.where(n == n.min())
    n1, b1, pathes1 = ax1.hist(separations_reduced,
                               bins=int(upperlimit2/0.5),
                               range=[0.0, upperlimit2],
                               color='blue')
    ax1.set_xlim(0.0, upperlimit2)

    ax1.locator_params(axis='x',nbins=4)

    s0 = 'Matched to self'
    ax1.annotate(s0,(0.28,0.95) , xycoords = 'axes fraction',size=8)
    s04 = '# of Objects = %i'%length
    ax1.annotate(s04,(0.28,0.90) , xycoords = 'axes fraction',size=8)
    s01 = '(All objects) Median = %.2f' % med
    ax1.annotate(s01,(0.28,0.85) , xycoords = 'axes fraction',size=8)
    s03 = '# of objects <= %i arcsecs = %i' % (upperlimit, length30)
    ax1.annotate(s03,(0.28,0.80) , xycoords = 'axes fraction',size=8)
    s02 = '(Objects<=30arcsecs) Median = %.2f' % med_red
    ax1.annotate(s02,(0.28,0.75) , xycoords = 'axes fraction',size=8)

    ax1.set_xlabel('Separation (arcseconds)')
    ax1.set_ylabel('Frequency')

    ax2 = fig.add_subplot(2,2,2)
    markersize = 0.5
    markersize = 1.0
    alpha = 1.0
    ax2.plot(difference_ra, difference_dec,
             'oc',
             markersize=markersize, markeredgewidth=0.0,
             alpha=alpha)
    xrange = [-1.0*upperlimits[1], 1.0*upperlimits[1]]
    yrange = [-1.0*upperlimits[1], 1.0*upperlimits[1]]
    print(xrange + yrange)
    ranges = xrange + yrange
    # ax2.axis('equal')
    ax2.set_aspect('equal')
    ax2.axis(ranges)
    ax2.locator_params(axis='x',nbins=4)
    ax2.set_xlabel('Delta RA (")')
    ax2.set_ylabel('Delta Dec (")')
    # s11 = 'Zoomed-in to 30 arcsecs'
    # ax2.annotate(s11,(0.45,0.95) , xycoords = 'axes fraction',size=8)
    s1 = '# of Objects = %i' % length
    ax2.annotate(s1,(0.45,0.90) , xycoords = 'axes fraction',size=8)
    s7 = 'MAD = %.2f' % mad_median
    ax2.annotate(s7,(0.45,0.85) , xycoords = 'axes fraction',size=8)
    s3 = 'MAD_std = %.2f' % mad_standard
    ax2.annotate(s3,(0.45,0.80) , xycoords = 'axes fraction',size=8)

    ax3 = fig.add_subplot(2,2,3)
    bin_size1=0.25; min_edge1=5;max_edge1=22
    N1 = (max_edge1-min_edge1)/bin_size1; Nplus11 = N1 + 1
    bin_list1 = np.linspace(min_edge1, max_edge1, Nplus11)
    ax3.hist(psfmag_reduced,bins=bin_list1,color='blue')
    xlabel = cols[2]
    ax3.set_xlabel(xlabel)
    ax3.set_ylabel('Frequency')
    ax3.locator_params(axis='x',nbins=4)
    # ax3.plot(to_match_RA,to_match_DEC,'og',markersize=0.5,markeredgewidth=0.0,alpha=0.3)
    # ax3.locator_params(axis='x',nbins=4)
    # ax3.set_xlabel('RA')
    # ax3.set_ylabel('DEC')

    ax4 = fig.add_subplot(2,2,4)
    bin_size=0.25; min_edge=5;max_edge=22
    N = (max_edge-min_edge)/bin_size; Nplus1 = N + 1
    bin_list = np.linspace(min_edge, max_edge, Nplus1)
    ax4.hist(psfmag_reduced,bins=bin_list,color='blue')
    ax4.hist(psfmag,bins=bin_list,color='green',alpha=0.3)
    xlabel = cols[2]
    ax4.set_xlabel(xlabel)
    ax4.set_ylabel('Frequency')
    ax4.locator_params(axis='x',nbins=4)

    fig.tight_layout()
    fig.subplots_adjust(top=0.88)

    plotid()

    if ('save' in keyword_parameter):
        path_to_save = str(keyword_parameter['save'])
        plt.savefig(path_to_save,dpi=150)
    else:
        plt.show()

    return (masked_list_ra_pairs1,masked_list_dec_pairs1,masked_list_ra_pairs2,masked_list_dec_pairs2)
Esempio n. 43
0
    def calc_background_rms(self, data, axis=None):
        if self.sigma_clip is not None:
            data = self.sigma_clip(data, axis=axis)

        return mad_std(data, axis=axis)
Esempio n. 44
0
def xmatch_checkplot(ra1, dec1, ra2, dec2,
                     figsize = (6.0, 6.0),
                     width=10.0,
                     gtype="all", add_plotid=True, prefix=None,
                     saveplot=True,
                     plotfile="", plotfile_prefix=None,
                     title="",
                     suptitle=""):
    """ Makes checkplot for catalogue xmatch results

    Forked from Sophie Reed's version on 20160319

    uses hist2d; a point based option would be useful

    Plot can either be square, the square inscribes the circle.
    Or all which has all the points in the matching circle.
    Square make the histograms more comparable.

    Compares RA_main and DEC_main columns with RA and Dec columns in the
    format output by the matching codes. Eg. RA_ + survey.

    Width needs to be in arcsecs
    """
    import math
    import time
    import inspect

    import numpy as np

    import matplotlib.pyplot as plt
    import matplotlib.gridspec as gridspec
    from matplotlib.colors import LogNorm

    # import stats
    # import plotid

    now = time.localtime(time.time())
    datestamp = time.strftime("%Y%m%d", now)
    function_name = inspect.stack()[0][3]

    lineno = str(inspect.stack()[0][2])
    print(mk_timestamp(), function_name, lineno + ':')
    print(function_name + '.saveplot:', saveplot)
    print(function_name + '.plotfile:', plotfile)
    print(function_name + '.prefix:  ', plotfile_prefix)

    ndata = len(ra1)


    # this could probably be simplified and speeded up
    n = 0
    xs = []
    ys = []
    while n < len(ra1):
        x = (ra1[n] - ra2[n]) * \
             math.cos((ra1[n] + ra2[n]) * math.pi / 360.0) * 3600.0
        y = (dec1[n] - dec2[n]) * 3600.0

        if not np.isnan(x) and not np.isnan(y):
            xs.append(x)
            ys.append(y)
        n += 1

    n = 0
    xs_s = []
    ys_s = []

    if gtype == "square":
        w = width / math.sqrt(2.0)
        while n < len(xs):
            x = xs[n]
            y = ys[n]
            if x <= w and x >= -w and y <= w and y >= -w:
                xs_s.append(xs[n])
                ys_s.append(ys[n])
            n += 1

        xs = xs_s
        ys = ys_s

    xs1 = list(xs) + []
    ys1 = list(ys) + []

    RA_med = np.median(xs1)
    DEC_med = np.median(ys1)
    RA_mad_std = mad_std(xs1)
    DEC_mad_std = mad_std(ys1)

    print("Number of points", len(xs))
    print("RA median offset", RA_med, "Dec median offset", DEC_mad_std)
    print("RA Sigma(MAD)", RA_mad_std, "Dec Sigma(MAD)", DEC_mad_std)
    print("RA median error", RA_mad_std / math.sqrt(len(xs)),
          "Dec median error", DEC_mad_std / math.sqrt(len(ys)))
    print("dRA range:", np.min(xs1), np.max(xs1))
    print("dDec range:", np.min(ys1), np.max(ys1))
    print()
    if len(xs) == 0:
        print("No matches")
        return RA_med, DEC_med

    xs = np.asarray(xs)
    ys = np.asarray(ys)
    xlimits = np.asarray([-1.0*width, width])
    ylimits = np.asarray([-1.0*width, width])
    limits = np.asarray([xlimits, ylimits])
    print(xlimits[0], xlimits[1])
    print(xs.dtype)
    print(xs.shape)
    print(xlimits.dtype)
    print(xlimits.shape)
    # itest = (xs > xlimits[0] & xs < xlimits[1])
    # xs = xs[itest]
    # itest = (ys > ylimits[0] & ys < ylimits[1])
    # ys = ys[itest]

    print('limits:', limits)
    gs = gridspec.GridSpec(2, 2, width_ratios=[2, 1], height_ratios=[1, 2])
    fig = plt.figure(figsize=figsize)
    ax1 = plt.subplot(gs[0])
    ax1.hist(xs, bins=100, color="r", range=xlimits)
    ax1.set_xlim(xlimits)
    ax1.axes.get_xaxis().set_visible(False)
    ax1.set_ylabel("Number")

    ax2 = plt.subplot(gs[2])
    # ax2.plot(xs, ys, "k+")
    if len(xs) > 100:
        plt.hist2d(xs, ys, bins=100,
                   cmap="binary",
                   norm=LogNorm(),
                   range=limits)
    else:
        plt.plot(xs, ys, "k.", ms=2)

    ax2.set_ylim(-1*width, width)
    ax2.set_xlim(-1*width, width)
    ax2.set_xlabel('Delta RA /"')
    ax2.set_ylabel('Delta Dec /"')
    labels1 = ax2.get_xticks()
    ax2.set_xticklabels(labels1, rotation=270)

    if suptitle is None:
        fig.suptitle("Errors in matching: " +
                     suptitle + ': ' + str(ndata), fontsize='small')

    if suptitle is not None:
        fig.suptitle(suptitle + ': ' + str(ndata), fontsize='small')

    ax3 = plt.subplot(gs[3])
    print('limits:', limits)
    ax3.hist(ys, bins=100, orientation="horizontal", color="r",
        range=ylimits)

    ax3.set_ylim(ylimits)
    ax3.set_xlabel("Number")
    ax3.axes.get_yaxis().set_visible(False)
    labels2 = ax3.get_xticks()
    ax3.set_xticklabels(labels2, rotation=270)

    ax4 = plt.subplot(gs[1])
    ax4.annotate("Number of points: " +
                 str(len(xs)), xy=(0.01, 0.1), size="small")
    ax4.annotate("RA offset: {0:.4f}".format(RA_med) +
                 '"', xy=(0.01, 0.90), size="small")
    ax4.annotate("DEC offset: {0:.4f}".format(DEC_med) +
                 '"', xy=(0.01, 0.8), size="small")
    ax4.annotate("RA sigma MAD: {0:.4f}".format(RA_mad_std) +
                 '"', xy=(0.01, 0.7), size="small")
    ax4.annotate("DEC sigma MAD: {0:.4f}".format(DEC_mad_std) +
                 '"', xy=(0.01, 0.6), size="small")
    ax4.annotate("RA median error: {0:.4f}".
                 format(RA_mad_std / math.sqrt(len(xs))) + '"',
                 xy=(0.01, 0.5), size="small")
    ax4.annotate("DEC median error: {0:.4f}".
                 format(DEC_mad_std / math.sqrt(len(ys))) + '"',
                 xy=(0.01, 0.4), size="small")
    ax4.annotate("RA sigma MAD: {0:.4f}".format(RA_mad_std) +
                 '"', xy=(0.01, 0.3), size="small")
    ax4.annotate("DEC sigma MAD: {0:.4f}".format(DEC_mad_std) +
                 '"', xy=(0.01, 0.2), size="small")

    ax4.axes.get_xaxis().set_visible(False)
    ax4.axes.get_yaxis().set_visible(False)

    if saveplot:
        lineno = str(inspect.stack()[0][2])
        print(mk_timestamp(), function_name, lineno)
        print('plotfile:', plotfile)
        print('plotfile_prefix:', plotfile_prefix)
        if add_plotid:
            # make room for the plotid on right edge
            fig.subplots_adjust(right=0.95)
            plotid()

        if plotfile is None:
            plotfile = 'match'
        if plotfile_prefix is not None and plotfile is None:
            plotfile = plotfile_prefix + '_match_' + datestamp + '.png'
        if plotfile_prefix is None and plotfile is None:
            plotfile = 'match_' + datestamp + '.png'

        print('Saving: ', plotfile)
        plt.savefig(plotfile)

    plt.show()

    return RA_med, DEC_med
Esempio n. 45
0
def xmatch_cat(table1=None, table2=None,
               radec1=None, radec2=None,
               nthneighbor=None,
               multimatch=False,
               seplimit=10.0,
               selfmatch=False,
               colnames_radec1=['ra', 'dec'],
               colnames_radec2=['ra', 'dec'],
               units_radec1=['degree', 'degree'],
               units_radec2=['degree', 'degree'],
               stats=False,
               debug=False,
               verbose=False,
               method=False):
    """RA, Dec nearest xmatch for two lists; returns pointers

    nearest match

    input can be an astropy table or zipped radec as a list

    e.g.

    c = zip([1],[1])
    radec1 = zip(ra1 , dec1)


    radec1 = np.column_stack(ra1, dec1))


    Self match notes:


    """

    import numpy as np
    import matplotlib.pyplot as plt

    from astropy.table import Table, hstack
    from astropy.coordinates import SkyCoord
    from astropy.coordinates import search_around_sky, match_coordinates_sky
    from astropy import units as u

    from astropy.stats import mad_std, median_absolute_deviation

    if verbose or debug:
        print('__file__:', __file__)
        print('__name__:', __name__)
    try:
        if 'filename' in table1.meta:
            print('table1.filename:', table1.meta['filename'])
    except:
        print("table1 has no metadata or table1.meta['filename']")

    if verbose or debug:
        print('colnames_radec1:', colnames_radec1)
        table1.info()

    # selfmatch does not need a 2nd table
    if not selfmatch:
        try:
            if 'filename' in table2.meta:
                print('table2.filename:', table2.meta['filename'])
        except:
            print("table2 has no metadata or table2.meta['filename']")

        if verbose or debug:
            print('colnames_radec2:', colnames_radec2)
            table2.info()

    if selfmatch:
        table2 = table1
        colnames_radec2 = colnames_radec1
        if nthneighbor is None:
            nthneighbor = 2

    if nthneighbor is None:
        nthneighbor = 1

    ra1 = table1[colnames_radec1[0]]
    dec1 = table1[colnames_radec1[1]]
    if verbose or debug:
        print('table1: ', colnames_radec1[0], table1[colnames_radec1[0]].unit)
        print('table1: ', colnames_radec1[1], table1[colnames_radec1[1]].unit)

    ra2 = table2[colnames_radec2[0]]
    dec2 = table2[colnames_radec2[1]]
    if verbose or debug:
        print('table2: ', colnames_radec2[0], table2[colnames_radec2[0]].unit)
        print('table2: ', colnames_radec2[1], table2[colnames_radec2[1]].unit)

    if stats or verbose or debug:
        print('RA1 range:', np.min(ra1), np.max(ra1))
        print('Dec1 range:', np.min(dec1), np.max(dec1))

        print('RA1 range:', np.min(ra2), np.max(ra2))
        print('Dec1 range:', np.min(dec2), np.max(dec2))


    skycoord1 = SkyCoord(ra1, dec1, unit=units_radec1, frame='icrs')
    skycoord2 = SkyCoord(ra2, dec2, unit=units_radec2, frame='icrs')

    # idx is an integer array into the second cordinate array to get the
    # matched points for the second coordindate array.
    # Shape of idx matches the first coordinate array
    idx1 = []
    idx2 = []
    if not method:
        if not multimatch:
            idx2, d2d, d3d = \
                match_coordinates_sky(skycoord1,
                                      skycoord2,
                                      nthneighbor=nthneighbor)
        if multimatch:
            idx1, idx2, d2d, d3d = \
                search_around_sky(skycoord1,
                                  skycoord2,
                                  seplimit * u.arcsec)

    # alternative 'method' form
    if method:
        if not multimatch:
            idx2, d2d, d3d = \
                skycoord1.match_to_catalog_sky(skycoord2,
                                              nthneighbor=nthneighbor)

        if multimatch:
            idx1, idx2, d2d, d3d = \
                skycoord1.search_around_sky(skycoord2,
                                            seplimit * u.arcsec)


    # compute the separations and
    if not multimatch:
        separation = skycoord1.separation(skycoord2[idx2])
        dra, ddec = \
            skycoord1.spherical_offsets_to(skycoord2[idx2])


    if multimatch:
        separation = skycoord1[idx1].separation(skycoord2[idx2])
        dra, ddec = \
            skycoord1[idx1].spherical_offsets_to(skycoord2[idx2])


    if stats or verbose or debug:
        print('multimatch:', multimatch)
        print('seplimit:', seplimit)
        print('len(table1):', len(table1))
        print('len(table2):', len(table2))
        print('len(idx1):', len(idx1))
        print('len(idx2):', len(idx2))
        print('idxmatch range:', np.min(idx2), np.max(idx2))
        print('d2d range:', np.min(d2d), np.max(d2d))
        print('d2d range:', np.min(d2d).arcsec, np.max(d2d).arcsec)
        print('d2d median:', np.median(d2d).arcsec)

        median_separation = np.median(separation).arcsec
        mad_std_separation = mad_std(separation.arcsec)
        print('dR range (arcsec):',
              np.min(separation.arcsec), np.max(separation.arcsec))
        print('dR mean, std (arcsec):',
              np.mean(separation).arcsec, np.std(separation).arcsec)
        print('dR  median, mad_std (arcsec):',
              median_separation, mad_std_separation)
        print()

        median_dra = np.median(dra).arcsec
        mad_std_dra = mad_std(dra.arcsec)
        print('dRA min, max:',
              np.min(dra).arcsec, np.max(dra).arcsec)
        print('dRA mean, std:',
              np.mean(dra).arcsec, np.std(dra).arcsec)
        print('dRA median, mad_std:',
              median_dra, mad_std_dra)
        print()

        median_ddec = np.median(ddec).arcsec
        mad_std_ddec = mad_std(ddec.arcsec)
        print('dDec min, max:',
              np.min(ddec).arcsec, np.max(ddec).arcsec)
        print('dDec mean, std:',
              np.mean(ddec).arcsec, np.std(ddec).arcsec)
        print('dDec median, mad_std:',
              median_ddec, mad_std_ddec)
        print()

    # convert to arcsec for convenience
    separation = separation.arcsec
    dr = d2d.arcsec
    dra = dra.arcsec
    ddec = ddec.arcsec

    # return dra, ddec, dr in arcsec
    # as a list or could be dict; check if scales from 10^3 -> 10^6 -> 10^9
    drplus = [dra, ddec, dr]

    if debug or verbose:
        print(len(idx2), len(dr))
        print(len(drplus), len(drplus[0]), len(drplus[1]), len(drplus[2]))

    # could add option to return dr, dra, ddec
    if not multimatch:
        return idx2, dr, dra, ddec

    if multimatch:
        return (idx1, idx2), dr, dra, ddec
Esempio n. 46
0
    if debug:
        help(xmatch_cat)
    print("Elapsed time %.3f seconds" % (time.time() - t0))
    """RA, Dec nearest xmatch for two lists; returns pointers """
    idxmatch, dr = xmatch_cat(table1=table1,
                              table2=table2,
                              colnames_radec1=colnames_radec1,
                              colnames_radec2=colnames_radec2,
                              selfmatch=False,
                              stats=True,
                              debug=debug,
                              verbose=True)
    print("Elapsed time %.3f seconds" % (time.time() - t0))

    dr_median = np.median(dr)
    dr_mad_std = mad_std(dr)
    numpoints = len(dr)
    print(len(dr), dr_median, dr_mad_std)

    itest = np.unique(idxmatch)
    print('Unique idxmatch:', len(itest), len(idxmatch))

    itest = np.unique(table1['row_id'])
    print('Unique row_id:', len(itest))


    for icount, id in enumerate(itest):
        print(icount + 1, id,
              mastercat['RA'][id], mastercat['Dec'][id])
        precision = 1
        print(icount + 1, id,
def make_hist(xdata=None, column=None, units=None, comment=None,
              waveband=None,
              figpath=None,
              infile=None, filename=None, datapath=None,
              zoom=False, save=True):
    """

    make EDA univariate histogram plots

    """

    fig = plt.figure(figsize=(8.0, 8.0))

    # what does this do?
    ids = np.where((xdata == xdata))[0]
    xdata = xdata[ids]
    pers = np.percentile(xdata, [1.0, 99.0])
    keeps = np.where((xdata < pers[1]) & (xdata > pers[0]))[0]

    if zoom and len(keeps) > 1:
        xdata1 = xdata[keeps]
        nper = len(xdata1)
        ax1 = fig.add_subplot(121)
        ax2 = fig.add_subplot(122, sharey=ax1)
        ax2.get_yaxis().set_visible(False)
        ax2.hist(xdata1, bins=100, log=True,
                 range=(min(xdata1), max(xdata1)))
        ax2.set_title("1st - 99th %tile: " + str(nper))
        labels2 = ax2.get_xticks()
        ax2.set_xticklabels(labels2, rotation=270)
    else:
        ax1 = fig.add_subplot(111)

    nr = len(xdata)
    ax1.hist(xdata, bins=100, log=True,
             range=(min(xdata), max(xdata)))
    labels1 = ax1.get_xticks()[:-1]
    ax1.set_xticklabels(labels1, rotation=270)
    text = ("Min: " + str(min(xdata)) + "\nMax: " + str(max(xdata)) +
            "\nMedian: " + str(np.median(xdata)) + "\nSigma MAD: " +
            str(mad_std(xdata)) + "\n1st %ile: " +
            str(pers[0]) + "\n99th %ile: " + str(pers[1]))
    ax1.text(0.2, 0.7, text,
             transform=ax1.transAxes, bbox=dict(facecolor='blue', alpha=0.2))
    ax1.set_title("All points: " + str(nr))
    text = column + " / " + units + "\n" + comment
    ax1.text(0.5, 0.05, text,
             ha="center", transform=fig.transFigure)
    ax1.set_ylabel("Frequency")
    print(column, filename, waveband)
    fig.suptitle(column + ":" + filename + ':' + waveband, fontsize='small')
    plt.subplots_adjust(left=0.1, right=0.9, top=0.9, bottom=0.2, wspace=0.0)
    plotid()
    fig = plt.gcf()
    fig.set_size_inches(10.0, 8.0)

    plotid()
    if save:
        basename=os.path.basename(infile)
        figfile = figpath + '/' + basename + '_hist_' + column + ".png"
        print('Saving:', figfile)
        plt.savefig(figfile)
        plt.close()
    else:
        plt.show()
Esempio n. 48
0
    #Iterate through the hours of one day
    while hour.day == date.day: 
        index = hour.isoformat(" ").split(".")[0]
        hour += datetime.timedelta(hours=1)
        if not index in openfile:
            continue
        for b in openfile[index]:
            block = openfile[index][b]
            avg_revs.append(float(block["avg_revenue"]))
            if block["avg_tips"] != "Insufficient data":
                avg_tips.append(float(block["avg_tips"]))
            avg_dists.append(float(block["avg_distance"]))
            avg_pass.append(float(block["avg_passengers"]))
            dist_list.append(float(block["distance"]))
            tip_list.append(float(block["tips"]))
            rev_list.append(float(block["revenue"]))
            pass_list.append(float(block["passengers"]))
            rides.append(float(block["rides"]))

    date += datetime.timedelta(days=1)

with open("../statistics.csv", "w") as f:
    writer = csv.writer(f)
    writer.writerow(["Statistic", "Revenue", "Tips", "Distance", "Passengers", "Avg Revenue", "Avg Tips", "Avg Distance", "Avg Passengers", "Rides"])
    writer.writerow(["Median", numpy.median(rev_list), numpy.median(tip_list), numpy.median(dist_list), numpy.median(pass_list), numpy.median(avg_revs), numpy.median(avg_tips), numpy.median(avg_dists), numpy.median(avg_pass), numpy.median(rides)])
    writer.writerow(["Mean", numpy.mean(rev_list), numpy.mean(tip_list), numpy.mean(dist_list), numpy.mean(pass_list), numpy.mean(avg_revs), numpy.mean(avg_tips), numpy.mean(avg_dists), numpy.mean(avg_pass), numpy.mean(rides)])
    writer.writerow(["Median Absolute Deviation", stats.mad_std(rev_list), stats.mad_std(tip_list), stats.mad_std(dist_list), stats.mad_std(pass_list), stats.mad_std(avg_revs), stats.mad_std(avg_tips), stats.mad_std(avg_dists), stats.mad_std(avg_pass), stats.mad_std(rides)])
    writer.writerow(["Standard Deviation", numpy.std(rev_list), numpy.std(tip_list), numpy.std(dist_list), numpy.std(pass_list), numpy.std(avg_revs), numpy.std(avg_tips), numpy.std(avg_dists), numpy.std(avg_pass), numpy.std(rides)])

Esempio n. 49
0
#    file.write(print_line)
#    file.close()
#
#    import matplotlib.pylab as plt
#    im2 = image
#    im2[im2<=0]=0.0001
#    plt.imshow(im2, cmap='gray', origin='lower')
#    apertures.plot(color='blue', lw=1.5, alpha=0.5)
#    plt.show()

hdulist = fits.open(inpath+file_name)
image = hdulist[0].data
#image = image.astype(float) - np.median(image)
from photutils import daofind
from astropy.stats import mad_std
bkg_sigma = mad_std(image)
sources = daofind(image, fwhm, threshold*bkg_sigma)
#print_line= (file_name+","+str(sources_2)+"\n")
sources_2 = np.array(sources["id", "xcentroid", "ycentroid", "sharpness", "roundness1", "roundness2", "npix", "sky", "peak", "flux", "mag"])
print_line= (file_name+","+str(sources_2))
file= open(outpath, "a")
file.write(print_line)
file.close()

from photutils import aperture_photometry, CircularAperture
positions = (sources['xcentroid'], sources['ycentroid'])
apertures = CircularAperture(positions, r)
phot_table = aperture_photometry(image, apertures)
phot_table_2 = np.array(phot_table["aperture_sum", "xcenter", "ycenter"])
print_line= (","+str(phot_table_2)+"\n")
file= open(outpath, "a")
def init_centroids(first_image_path, master_flat, master_dark, target_centroid,
                   max_number_stars=10, min_flux=0.2, plots=False):

    first_image = np.median([(fits.getdata(path) - master_dark)/master_flat
                             for path in first_image_path], axis=0)

    tophat_kernel = Tophat2DKernel(5)
    convolution = convolve_fft(first_image, tophat_kernel, fftn=fft2, ifftn=ifft2)

    convolution -= np.median(convolution)

    mad = mad_std(convolution)

    convolution[convolution < -5*mad] = 0.0

    from skimage.filters import threshold_yen
    from skimage.measure import label, regionprops

    thresh = threshold_yen(convolution)/4 # Use /4 for planet c, /2 for planet b
    #thresh = threshold_otsu(convolution)/15

    masked = np.ones_like(convolution)
    masked[convolution <= thresh] = 0

    label_image = label(masked)

    plt.figure()
    plt.imshow(label_image, origin='lower', cmap=plt.cm.viridis)
    plt.show()

    # regions = regionprops(label_image, convolution)
    regions = regionprops(label_image, first_image)

    # reject regions near to edge of detector
    buffer_pixels = 50
    regions = [region for region in regions
               if ((region.weighted_centroid[0] > buffer_pixels and
                   region.weighted_centroid[0] < label_image.shape[0] - buffer_pixels)
               and (region.weighted_centroid[1] > buffer_pixels and
                    region.weighted_centroid[1] < label_image.shape[1] - buffer_pixels))]

    #centroids = [region.weighted_centroid for region in regions]
    #intensities = [region.mean_intensity for region in regions]

    target_intensity = regions[0].mean_intensity
    target_diameter = regions[0].equivalent_diameter
    #  and region.equivalent_diameter > 0.8 * target_diameter
    centroids = [region.weighted_centroid for region in regions
                 if min_flux * target_intensity < region.mean_intensity]
    # intensities = [region.mean_intensity for region in regions
    #                if min_flux * target_intensity < region.mean_intensity]
#    centroids = np.array(centroids)[np.argsort(intensities)[::-1]]

    distances = [np.sqrt((target_centroid[0] - d[0])**2 +
                         (target_centroid[1] - d[1])**2) for d in centroids]

    centroids = np.array(centroids)[np.argsort(distances)]

    positions = np.vstack([[y for x, y in centroids], [x for x, y in centroids]])

    if plots:
        apertures = CircularAperture(positions, r=12.)
        apertures.plot(color='r', lw=2, alpha=1)
        plt.imshow(first_image, vmin=np.percentile(first_image, 0.01),
                   vmax=np.percentile(first_image, 99.9), cmap=plt.cm.viridis,
                   origin='lower')
        plt.scatter(positions[0, 0], positions[1, 0], s=150, marker='x')

        plt.show()
    return positions
Esempio n. 51
0
def xmatch_checkplots0(ra1, dec1, ra2, dec2,
                      width=10.0,
                      binsize=1.0,
                      saveplot=True,
                      markersize=1.0,
                      plotfile='',
                      suptitle='',
                      **kwargs):

    """
    Based on code by Chris Desira

    """

    import numpy as np

    import matplotlib.pyplot as plt

    from astropy import stats
    from astropy.coordinates import SkyCoord
    from astropy import units as u


    from librgm.plotid import plotid

    rmax = width

    print('RA1 range:', np.min(ra1), np.max(ra1))
    print('Dec1 range:', np.min(dec1), np.max(dec1))
    print('RA2 range:', np.min(ra2), np.max(ra2))
    print('Dec2 range:', np.min(dec2), np.max(dec2))

    # offsets in arc seconds
    difference_ra = (ra1 - ra2) * np.cos(np.radians(dec1)) * 3600.0
    difference_dec = (dec1 - dec2) * 3600.0

    itest = (np.abs(difference_ra) < rmax) & (np.abs(difference_dec) < rmax)

    difference_ra = difference_ra[itest]
    difference_dec = difference_dec[itest]

    skycoord_object1 = SkyCoord(ra1, dec1, unit=('degree', 'degree'),
        frame='icrs')
    skycoord_object2 = SkyCoord(ra2, dec2, unit=('degree', 'degree'),
        frame='icrs')

    skycoord_object1 = skycoord_object1[itest]
    skycoord_object2 = skycoord_object2[itest]

    separations = skycoord_object1.separation(skycoord_object2)

    med = np.median(separations.arcsec)
    ndata = len(separations)
    mad = stats.median_absolute_deviation(separations.arcsec)
    mad_std = stats.mad_std(separations.arcsec)

    fig = plt.figure(1, figsize=(10, 5))

    plt.suptitle(suptitle, size=10)

    ax1=fig.add_subplot(1,2,1)

    xdata = separations.arcsec

    n, b, patches = ax1.hist(xdata, bins=rmax/binsize,
                             range=[0.0, rmax],
                             color='green', alpha=0.5)

    bin_min = np.where(n == n.min())


    ax1.locator_params(axis='x', nbins=4)

    s04 = '# = %i'% ndata
    ax1.annotate(s04,(0.28,0.90) , xycoords = 'axes fraction',size=8)

    s01 = 'Median = %.2f' % med
    ax1.annotate(s01,(0.28,0.85) , xycoords = 'axes fraction',size=8)

    ax1.set_xlabel('Pariwise separation (arcsec)')
    ax1.set_ylabel('Frequency per bin')

    ax2 = fig.add_subplot(1,2,2, aspect='equal')

    alpha = 1.0
    ax2.plot(difference_ra,difference_dec,'oc',
             markersize=markersize,
             markeredgewidth=0.0,
             alpha=alpha) #0.5 smallest size

    ax2.axis([-1.0*rmax, rmax,-1.0*rmax, rmax])
    ax2.locator_params(axis='x',nbins=4)
    ax2.set_xlabel('Delta RA')
    ax2.set_ylabel('Delta Dec')
    s11 = 'Self-xmatch'
    ax2.annotate(s11,(0.45,0.95) , xycoords = 'axes fraction',size=8)
    s1 = '# of Objects = %i' % ndata
    ax2.annotate(s1,(0.45,0.90) , xycoords = 'axes fraction',size=8)
    s7 = 'MAD = %.2f' % mad
    ax2.annotate(s7,(0.45,0.85) , xycoords = 'axes fraction',size=8)
    s3 = 'sigma_MAD = %.2f' % mad_std
    ax2.annotate(s3,(0.45,0.80) , xycoords = 'axes fraction',size=8)

    fig.tight_layout()
    ax2.grid()

    fig.subplots_adjust(top=0.88)

    # make room for the plotid on right edge
    fig.subplots_adjust(right=0.95)
    plotid()

    if plotfile != None:
        print('Saving plotfile:', plotfile)
        plt.savefig(plotfile)

    if ('save' in kwargs):
        path_to_save = str(kwargs['save'])
        plt.savefig(path_to_save, dpi=150)
    else:
        plt.show()
def add_columns_spherical_offsets(table=None,
                                  ra1=None, dec1=None,
                                  ra2=None, dec2=None,
                                  colname_suffix=None,
                                  plot_drarange=None,
                                  plot_ddecrange=None,
                                  plots=False,
                                  colnames=None,
                                  verbose=False,
                                  **kwargs):
    """

    input is ra1, dec1, ra2, ra2 in pairwise match order

    http://docs.astropy.org/en/stable/api/astropy.table.Table.html#astropy.table.Table.add_columns
    http://docs.astropy.org/en/stable/api/astropy.coordinates.SkyCoord.html
    http://docs.astropy.org/en/stable/coordinates/matchsep.html
    http://docs.astropy.org/en/stable/api/astropy.coordinates.SkyCoord.html#astropy.coordinates.SkyCoord.position_angle

    plots are based on cdesira code

    """


    if verbose:
        if 'filename' in data.meta:
            print('filename:', filename)

        print('ra1 range:', np.min(ra1), np.max(ra1))
        print('dec1 range:', np.min(dec1), np.max(dec1))

        print('ra2 range:', np.min(ra2), np.max(ra2))
        print('dec2 range:', np.min(dec2), np.max(dec2))

    # I am not sure whether/why the next part is needed or working
    # convert ra, dec to units of deg since SkyCoord defaults to def
    # technically this is not needed for the astropy matching since
    # astropy supports units
    try:
        if ra1.unit != 'deg':
            ra1 = ra1 * u.deg
    except:
        # ra1.unit = 'deg'
        pass
    try:
        if dec1.unit != 'deg':
            dec1 = dec1 * u.deg
    except:
        pass

    print(type(ra1), len(ra1))
    print(type(dec1), len(dec1))
    c1 = SkyCoord(ra=ra1, dec=dec1)

    if ra2.unit != 'deg':
        ra2 = ra2 * u.deg
    if dec2.unit != 'deg':
        dec2 = dec2 * u.deg
    c2 = SkyCoord(ra=ra2, dec=dec2)

    if verbose:
        print('ra1 range:', np.min(ra1), np.max(ra1))
        print('dec1 range:', np.min(dec1), np.max(dec1))

        print('ra2 range:', np.min(ra2), np.max(ra2))
        print('dec2 range:', np.min(dec2), np.max(dec2))
        print()

    dra, ddec = c1.spherical_offsets_to(c2)
    sep = c1.separation(c2)
    pa = c1.position_angle(c2)

    # compute the statistics
    print('stats: n, min, max, mean, median, rms, mad_rms')
    sep_mad_std = apstats.mad_std(sep)
    print('sep:', len(sep), np.min(sep.arcsec), np.max(sep.arcsec),
          np.mean(sep.arcsec), np.median(sep.arcsec),
          np.std(sep.arcsec), apstats.mad_std(sep.arcsec))
    print('pa: ', len(pa), np.min(pa.deg), np.max(pa.deg),
          np.mean(pa.deg), np.median(pa.deg),
          np.std(pa.deg), apstats.mad_std(pa.deg))
    print('dra: ', len(dra.arcsec), np.min(dra.arcsec), np.max(dra.arcsec),
          np.mean(dra.arcsec), np.median(dra.arcsec),
          np.std(dra.arcsec), apstats.mad_std(dra.arcsec))
    print('ddec:', len(ddec), np.min(ddec.arcsec), np.max(ddec.arcsec),
          np.mean(ddec.arcsec), np.median(ddec.arcsec),
          np.std(ddec.arcsec), apstats.mad_std(ddec.arcsec))
    print()

    # need to move these outside this function for portability
    if plots:
        # drarange=[-0.5, 0.5]
        # ddecrange=[-0.5, 0.5]
        alpha = 1.0
        markersize = 4.0
        plt.figure(1, figsize=(8.0, 8.0))
        ndata = len(dra)
        plt.plot(dra.arcsec, ddec.arcsec,
                 'oc',
                 markersize=markersize, markeredgewidth=0.0,
                 alpha=alpha, label=str(ndata))
        plt.xlabel('dra (")')
        plt.ylabel('ddec (")')
        if plot_drarange is not None:
            plt.xlim(plot_drarange)
        if plot_ddecrange is not None:
            plt.ylim(plot_ddecrange)
        plt.grid()
        plt.legend()
        plotid()

        dra_mean = np.mean(dra.arcsec)
        dra_median = np.median(dra.arcsec)
        dra_std = np.std(dra.arcsec)
        dra_mad_std = apstats.mad_std(dra.arcsec)

        ddec_mean = np.mean(ddec.arcsec)
        ddec_median = np.median(ddec.arcsec)
        ddec_std = np.std(ddec.arcsec)
        ddec_mad_std = apstats.mad_std(ddec.arcsec)

        s0 = '           dra, ddec'
        plt.annotate(s0,(0.05,0.98) , xycoords = 'axes fraction',size=12)
        s1 = 'mean    = %.3f, %.3f'% (dra_mean, ddec_mean)
        plt.annotate(s1,(0.05,0.94) , xycoords = 'axes fraction',size=12)
        s2 = 'median  = %.3f, %.3f'% (dra_median, ddec_median)
        plt.annotate(s2,(0.05,0.90) , xycoords = 'axes fraction',size=12)
        s3 = 'std     = %.3f, %.3f' % (dra_std, ddec_std)
        plt.annotate(s3,(0.05,0.86) , xycoords = 'axes fraction',size=12)
        s4 = 'mad_std = %.3f, %.3f' % (dra_mad_std, ddec_mad_std)
        plt.annotate(s4,(0.05,0.82) , xycoords = 'axes fraction',size=12)

        if ('plot_title' in kwargs):
            plt.title(str(kwargs['plot_title']), fontsize='medium')
        if ('plot_suptitle' in kwargs):
            plt.suptitle(str(kwargs['plot_suptitle']), fontsize='medium')

        plt.show()

    if colname_suffix is None:
        colname_suffix = ''

    if colname_suffix is not None:
        colname_suffix = '_' + colname_suffix

    # maybe should be in degrees
    table['dRA' + colname_suffix] = dra.arcsec
    table['dDec' + colname_suffix] = ddec.arcsec
    table['dR' + colname_suffix] = sep.arcsec
    table['PA' + colname_suffix] = pa.deg

    return table
Esempio n. 53
0
    def printfc(self,outname,annotate=True,cntr=4.0):

        """

        Generate finder

        outname: name of output
        annotate: if True, add info to finder
        cntr: number of sigma sky for display contrast  

        """


        #display image 
        fig = plt.figure(figsize=(8,10),dpi=80)
        ax = fig.add_subplot(111,projection=self.w)
        mean=np.median(self.img)
        stdev=mad_std(self.img) 
        imgplot = ax.imshow(self.img,origin='low',cmap='gray_r',
                            clim=(mean-cntr*stdev,mean+cntr*stdev))

        
        if(annotate):

            #make annotations
            #mark position obj 
            Ax,Ay = wcs.utils.skycoord_to_pixel(self.obj,self.w)
            p=patches.Circle((Ax,Ay),2.0/self.scale,edgecolor='red',
                             linewidth=3,facecolor='none')
            ax.add_patch(p)
  
            txt=self.obj.to_string(style='hmsdms',sep=':',precision=3)
            plt.text(0.5, 0.95,"Object: "+txt, horizontalalignment='center',
                     verticalalignment='center',transform=fig.transFigure,\
                         fontsize=22,color='red')

            #mark position offset 
            if(self.off):
                Bx,By = wcs.utils.skycoord_to_pixel(self.off,self.w)
                p=patches.Circle((Bx,By),2.0/self.scale,edgecolor='blue',
                                 linewidth=3,facecolor='none')
                ax.add_patch(p)
  
                txt=self.off.to_string(style='hmsdms',sep=':',precision=3)
                plt.text(0.5, 0.9,"Offset: "+txt, horizontalalignment='center',
                         verticalalignment='center',transform=fig.transFigure,
                         fontsize=22,color='blue')
                         
                         
                #offsets
                ra_offset = (((self.off.ra - self.obj.ra) * np.cos(self.obj.dec.to('radian'))).to('arcsec')).value
                dec_offset = (((self.off.dec - self.obj.dec)).to('arcsec')).value
                
                posang=self.obj.position_angle(self.off).degree

                strng=r'$\alpha$:{:8.2f}" $\delta$:{:8.2f}" PA:{:6.1f} m:{:4.1f}'.\
                    format(ra_offset,dec_offset,posang,self.offmag)
                plt.text(0.5, 0.84,strng, horizontalalignment='center',
                         verticalalignment='center',transform=fig.transFigure,
                         fontsize=25)
  
            #mark north east 
            plt.text(self.img.shape[0]/2.,self.img.shape[1],'North',
                     horizontalalignment='center',
                     verticalalignment='top',fontsize=25)
            plt.text(0.,self.img.shape[1]/2.,'East',
                     horizontalalignment='left',rotation='vertical',
                     verticalalignment='center',fontsize=25)
  
        #save
        plt.savefig(outname)
Esempio n. 54
0
def xmatch_groups(table1=None, table2=None,
                  colnames_radec1=['ra', 'dec'],
                  colnames_radec2=['ra', 'dec'],
                  units_radec1=['degree', 'degree'],
                  units_radec2=['degree', 'degree'],
                  selfmatch=False,
                  rmin=None,
                  rmax=10.0,
                  stats=True,
                  debug=False,
                  verbose=False,
                  checkplot=True,
                  join=False,
                  plot_title=None,
                  plotfile_label=''):
    """Group RA, Dec xmatch for two lists; returns pointers

    Topcat

    http://www.star.bris.ac.uk/~mbt/topcat/sun253/sun253.html#matchAlgorithm

    http://www.star.bris.ac.uk/~mbt/topcat/sun253/sun253.html#matchCriteria


    all matchs within a radius

    https://www.sites.google.com/site/mrpaulhancock/blog/theage-oldproblemofcross-matchingastronomicalsources

    http://www.astropy.org/astropy-tutorials/Coordinates.html

    see:
    http://docs.astropy.org/en/stable/_modules/astropy/table/groups.html


    Self match notes:


    """

    import numpy as np
    import matplotlib.pyplot as plt

    from astropy.table import Table, Column, hstack
    from astropy.coordinates import SkyCoord
    from astropy.coordinates import search_around_sky, match_coordinates_sky
    from astropy import units as u
    from astropy.stats import mad_std, median_absolute_deviation

    from librgm.plotid import plotid

    print('__file__:', __file__)
    print('__name__:', __name__)
    print('colnames_radec1:', colnames_radec1)
    print('colnames_radec2:', colnames_radec2)
    print('plotfile_label:', plotfile_label)

    import xmatch_checkplot
    import xmatch_checkplot0

    if selfmatch:
        table2 = table1
        colnames_radec2 = colnames_radec1

    ra1 = table1[colnames_radec1[0]]
    dec1 = table1[colnames_radec1[1]]

    ra2 = table2[colnames_radec2[0]]
    dec2 = table2[colnames_radec2[1]]

    if stats or verbose or debug:
        print('RA1 range:', np.min(ra1), np.max(ra1))
        print('Dec1 range:', np.min(dec1), np.max(dec1))

        print('RA1 range:', np.min(ra2), np.max(ra2))
        print('Dec1 range:', np.min(dec2), np.max(dec2))

    skycoord1 = SkyCoord(ra1, dec1, unit=units_radec1, frame='icrs')
    skycoord2 = SkyCoord(ra2, dec2, unit=units_radec1, frame='icrs')

    """
    idx is an integer array into the first cordinate array to get the
    matched points for the second coorindate array.
    Shape of idx matches the first coordinate array

    http://docs.astropy.org/en/stable/api/astropy.coordinates.search_around_sky.html
    astropy.coordinates.search_around_sky(
        coords1, coords2, seplimit, storekdtree='kdtree_sky'

    Returns:

    idx1 : integer array
           Indices into coords1 that matches to the corresponding element of
           idx2. Shape matches idx2.

    idx2 : integer array
           Indices into coords2 that matches to the corresponding element of
           idx1. Shape matches idx1.
    sep2d : Angle

    The on-sky separation between the coordinates. Shape matches idx1 and idx2.

    """

    idxmatch1, idxmatch2, d2d, d3d = \
        skycoord1.search_around_sky(skycoord2,
                                    rmax * u.arcsec)
    if selfmatch:
        itest = idxmatch1 != idxmatch2
        print('selfmatch: Number of matchs within rmax:',
            len(idxmatch1[itest]), len(table1), rmax)
        idxmatch1 = idxmatch1[itest]
        idxmatch2 = idxmatch2[itest]
        d2d = d2d[itest]
        d3d = d3d[itest]


    isort = np.argsort(idxmatch1)
    idxmatch1 = idxmatch1[isort]
    idxmatch2 = idxmatch2[isort]

    separation = skycoord1[idxmatch1].separation(skycoord2[idxmatch2])
    pa = skycoord1[idxmatch1].position_angle(skycoord2[idxmatch2])
    dra, ddec = \
        skycoord1[idxmatch1].spherical_offsets_to(skycoord2[idxmatch2])

    print(len(idxmatch1), np.min(idxmatch1), np.max(idxmatch1))
    idxmatch1_unique, index, counts = np.unique(
        idxmatch1, return_index=True, return_counts=True)
    data = counts

    binwidth = 1
    ndata = np.sum(counts)
    print(len(data), data.shape, np.min(data), np.max(data))
    plt.hist(data, bins=range(min(data), max(data) + binwidth, binwidth),
             label=str(ndata))
    if plot_title is not None:
        plt.title(plot_title)
    plt.xlabel('Group size')
    plt.ylabel('Frequency')
    plt.legend()
    plotid()
    plt.show()

    idxmatch2_unique, index, counts = np.unique(
        idxmatch2, return_index=True, return_counts=True)
    data = counts
    binwidth = 1
    ndata=np.sum(counts)
    print(len(data), np.min(data), np.max(data))
    plt.hist(data,
             bins=range(min(data), max(data) + binwidth, binwidth),
             label=str(ndata))
    if plot_title is not None:
        plt.title(plot_title)
    plt.xlabel('Group size')
    plt.ylabel('Frequency')
    plotid()
    plt.legend()
    plt.show()

    print('table1 columns:', len(table1.colnames))
    print('table2 columns:', len(table2.colnames))

    # result = hstack([table1[idxmatch1], table1[idxmatch2]])
    # print('result columns:', len(result.colnames))
    # nrows = len(result)

    xmatch1 = table1[idxmatch1]
    xmatch2 = table2[idxmatch2]

    nrows = len(xmatch1)
    groupid = np.empty(nrows, dtype=int)
    groupsize = np.zeros(nrows, dtype=int)

    for isource, idxsource in enumerate(idxmatch1):
        if isource == 0:
            igroup = 1
            groupid[isource] = igroup
            if groupsize[isource] == 0:
                groupsize[isource] = 2

        if isource != 0:
            if idxmatch1[isource] == idxmatch1[isource - 1]:
                groupsize[isource] = groupsize[isource - 1] + 1
                groupid[isource] = groupid[isource - 1]

            if idxmatch1[isource] != idxmatch1[isource - 1]:
                groupsize[isource] = 2
                igroup = igroup + 1
                groupid[isource] = igroup

    print('Group size range:', np.min(groupsize), np.max(groupsize))
    for igroupsize in range(np.max(groupsize) + 1):
        itest = (groupsize == igroupsize)
        print('groups:', igroupsize, len(groupsize[itest]))

    # remove simple mirror pairs
    # for isource, source in enumerate(result):
    #    if

    key=raw_input("Enter any key to continue: ")

    id = np.linspace(1, nrows, num=nrows, dtype=int)
    print('id range:', np.min(id), np.max(id), len(id))
    # print('id:', len(result), len(id), np.min(id), np.max(id))

    id = Column(id, name='id')
    # result.add_column(id, index=0) # Insert before the first table column
    xmatch1.add_column(id, index=0)

    groupid = Column(groupid, name='groupid')
    # result.add_column(groupid, index=1)
    xmatch1.add_column(groupid, index=1)

    groupsize = Column(groupsize, name='groupsize')
    # result.add_column(groupsize, index=2)
    xmatch1.add_column(groupsize, index=2)

    xmatch1.add_column(Column(separation.arcsec, name='dR_1_2'), index=3)
    # xmatch1['dr_1_2'] = separation.arcsec

    xmatch1.add_column(Column(pa.degree, name='PA_1_2'), index=4)
    # xmatch1['PA_1_2'] = pa.degree

    xmatch1.add_column(Column(dra.arcsec, name='dRA_1_2'), index=5)
    xmatch1.add_column(Column(ddec.arcsec, name='dDec_1_2'), index=6)
    # xmatch1['dRA_1_2'] = dra.arcsec
    # xmatch1['dDec_1_2'] = ddec.arcsec

    xmatch1.info('stats')
    print('Number of rows:', len(xmatch1))
    #result.info('stats')

    # exclude duplicates or small separation objects
    if rmin is not None:
        itest = (xmatch1['dR_1_2'] > rmin)
        xmatch1 = xmatch1[itest]

    xmatch1.write('closepair_groups.fits', overwrite=True)
    # result.write('result_join.fits')

    result = xmatch1

    key=raw_input("Enter any key to continue: ")

    idxmatch2_unique = np.unique(idxmatch2)
    print('Number of unique idxmatch1:', len(idxmatch1_unique))
    print('Number of unique idxmatch2:', len(idxmatch2_unique))

    if stats or verbose or debug:
        print('len(table1):', len(table1))
        print('len(table2):', len(table2))
        print()
        print('len(idxmatch1):', len(idxmatch1))
        print('idxmatch1 range:', np.min(idxmatch1), np.max(idxmatch1))
        print()
        print('len(idxmatch2):', len(idxmatch2))
        print('idxmatch1 range:', np.min(idxmatch2), np.max(idxmatch2))
        print()
        print('d2d range (arcsec):', np.min(d2d).arcsec, np.max(d2d).arcsec)
        print('d2d median (arcsec):', np.median(d2d).arcsec)

        median_separation = np.median(separation).arcsec
        mad_std_separation = mad_std(separation.arcsec)

        print('dR range (arcsec):',
              np.min(separation.arcsec), np.max(separation.arcsec))
        print('dR mean, std (arcsec):',
              np.mean(separation).arcsec, np.std(separation).arcsec)
        print('dR  median, mad_std (arcsec):',
              median_separation, mad_std_separation)
        print()

        median_dra = np.median(dra).arcsec
        mad_std_dra = mad_std(dra.arcsec)
        print('dRA min, max:',
              np.min(dra.arcsec), np.max(dra.arcsec))
        print('dRA mean, std:',
              np.mean(dra.arcsec), np.std(dra.arcsec))
        print('dRA median, mad_std:',
              median_dra, mad_std_dra)
        print()

        median_ddec = np.median(ddec).arcsec
        mad_std_ddec = mad_std(ddec.arcsec)
        print('dDec min, max:',
              np.min(ddec).arcsec, np.max(ddec).arcsec)
        print('dDec mean, std:',
              np.mean(ddec).arcsec, np.std(ddec).arcsec)
        print('dDec median, mad_std:',
              median_ddec, mad_std_ddec)
        print()

    if checkplot:
        suptitle = plotfile_label
        plotfile = 'xmatch_cat' + plotfile_label + '_a_checkplot.png'

        ra2_xmatch = ra2[idxmatch2]
        dec2_xmatch = dec2[idxmatch2]

        xmatch_checkplot.xmatch_checkplot(
            ra1, dec1, ra2_xmatch, dec2_xmatch,
            width=rmax,
            gtype='square',
            saveplot=True,
            plotfile=plotfile,
            suptitle=suptitle)
        plt.close()

        plotfile = 'xmatch_cat' + plotfile_label + '_b_checkplot0.png'
        xmatch_checkplot0.xmatch_checkplot0(
                      ra1, dec1, ra2_xmatch, dec2_xmatch,
                      width=10.0,
                      gtype='square',
                      saveplot=True,
                      plotfile=plotfile,
                      suptitle=suptitle)
        plt.close()

    separation = separation.arcsec
    dr = d2d.arcsec

    print(len(idxmatch1), len(idxmatch2), len(dr))

    return result, idxmatch1, idxmatch2, d2d.arcsec
Esempio n. 55
0
    def calc_background_rms(self, data):

        return mad_std(self.sigma_clip(data))
def multi_night(sources, unique_nights, night,
                brightest_mag, mags, mag_err,
                uniform_ylim=True):
    """
    Plot magnitude vs time data for several sources over several nights
    """
    number_of_nights = len(unique_nights)

    for source in sources:
        f = plt.figure(figsize=(5 * number_of_nights, 5))

        night_means = []
        night_stds = []
        night_bins = []
        source_mags = mags[source.id - 1]
        if uniform_ylim:
            # Use median to handle outliers.
            source_median = np.median(source_mags[np.isfinite(source_mags)])
            # Use median absolute deviation to get measure of scatter.
            # Helps avoid extremely points.
            source_variation = 3 * mad_std(source_mags[np.isfinite(source_mags)])

            # Ensure y range will be at least 0.2 magnitudes
            if source_variation < 0.1:
                half_range = 0.1
            else:
                half_range = source_variation

            y_range = (source_median - half_range, source_median + half_range)
        else:
            # Empty if this option wasn't chosen so that automatic limits will be used.
            y_range = []

        last_axis = None
        for i, this_night in enumerate(unique_nights):
            last_axis = plt.subplot(1, number_of_nights + 1, i + 1,
                                    sharey=last_axis)
            night_mask = (night == this_night)
            night_mean, night_std = plot_magnitudes(mags=mags[source.id - 1][night_mask],
                                                    errors=mag_err[source.id - 1][night_mask],
                                                    times=source.bjd_tdb[night_mask],
                                                    source=source.id, night=this_night,
                                                    ref_mag=brightest_mag,
                                                    y_range=y_range)
            night_means.append(night_mean)
            night_stds.append(night_std)
            night_bins.append(this_night)

        plt.subplot(1, number_of_nights + 1, number_of_nights + 1)

        if uniform_ylim:
            f.subplots_adjust(wspace=0)
            plt.setp([a.get_yticklabels() for a in f.axes[1:]], visible=False)

        # Plot indicators of variation, and information about this source.
        # For simplicity, make the x and y range of this plot be 0 to 1.
        x = np.array([0., 1])
        y = x

        # Add invisible line to make plot.
        plt.plot(x, y, alpha=0, label='source {}'.format(source.id))
        night_means = np.array(night_means)

        # Plot bar proportional to Lomb-Scargle power.
        bad_mags = (np.isnan(mags[source.id - 1]) |
                    np.isinf(mags[source.id - 1]))
        bad_errs = (np.isnan(mag_err[source.id - 1]) |
                    np.isinf(mag_err[source.id - 1]))
        bads = bad_mags | bad_errs
        good_mags = ~bads
        model = LombScargleFast().fit(source.bjd_tdb[good_mags],
                                      mags[source.id - 1][good_mags],
                                      mag_err[source.id - 1][good_mags])
        periods, power = model.periodogram_auto(nyquist_factor=100,
                                                oversampling=3)
        max_pow = power.max()

        # print(source, max_pow)
        if max_pow > 0.5:
            color = 'green'
        elif max_pow > 0.4:
            color = 'cyan'
        else:
            color = 'gray'

        bar_x = 0.25
        plt.plot([bar_x, bar_x], [0, max_pow],
                 color=color, linewidth=10, label='LS power')

        plt.legend()

        # Add dot for magnitude of star.
        size = 10000./np.abs(10**((source_median - brightest_mag)/2.5))
        plt.scatter([0.8], [0.2], c='red', marker='o', s=size)
        plt.ylim(0, 1)
Esempio n. 57
0

    """RA, Dec nearest xmatch for two lists; returns pointers """
    print('table1 selfxmatch')
    t0 = time.time()
    idx, dr, dra, ddec = xmatch_cat(table1=table1,
                                    selfmatch=True,
                                    stats=True,
                                    debug=False,
                                    verbose=True)
    print("Elapsed time %.3f seconds" % (time.time() - t0))
    if args.debug:
        raw_input('Type any key to continue> ')
    dr_mean = np.average(dr)
    dr_median = np.median(dr)
    dr_mad_std = mad_std(dr)
    numpoints = len(dr)

    plt.figure(figsize=(12, 4))
    plt.subplot(1, 3, 1)

    """Default is taken from the rcParam hist.bins."""
    prefix = os.path.basename(__file__)
    plt.suptitle(prefix + ': ' + 'dr histogram')
    plot_label = ("npts: {}".format(numpoints) + '\n' +
                  "mean:  {:.2f} arcsec".format(dr_mean) + '\n' +
                  "median:  {:.2f} arcsec".format(dr_median) + '\n' +
                  "mad_std: {:.2f} arcsec".format(dr_mad_std))

    n_bins = 100
    plt.hist(dr, bins=n_bins, fill=False, histtype='step',
Esempio n. 58
0
def des_get_cutout(infile=None, data=None, ext=1, header=None,
               AstWCS=None,
               position=None, format='pixels', size=100,
               title=None, suptitle=None,
               imagetype='data',
               segmap=False, weightmap=False,
               plot=False, saveplot=True,
               plotfile_suffix=None, plotfile_prefix=None,
               verbose=False, debug=False):
    """



    """

    import numpy as np

    import matplotlib as mpl
    import matplotlib.pyplot as plt

    from astropy.nddata.utils import Cutout2D
    from astropy.io import fits
    from astropy import wcs
    from astropy.stats import mad_std
    from librgm.plotid import plotid


    print('position: ', position[0], position[1])
    position = np.rint(position)
    print('position: ', position[0], position[1])

    if infile is not None:
        hdulist = fits.open(infile)
        hdulist.info()
        AstWCS = wcs.WCS(hdulist[ext].header)
        xpix0 = np.rint(position[0]) - (size/2)
        ypix0 = np.rint(position[1]) - (size/2)
        xpix0 =  xpix0.astype(int)
        ypix0 =  ypix0.astype(int)
        print('xpix0, ypix0: ', xpix0, ypix0)
        xpix1 = xpix0 + size
        ypix1 = ypix0 + size
        data = hdulist[ext].data[ypix0:ypix1, xpix0:xpix1]

    if debug: help(data)
    print('data.shape: ', data.shape)
    median=np.median(data)
    print('median.shape: ', median.shape)
    print('median: ', median)

    if segmap:
        # determine the list of unique sources in the segmentation image
        unique_sources = np.unique(data)
        nsources = len(unique_sources)
        print('Number of unique segmented sources: ', nsources)
        print(unique_sources)
        isource = 1
        # skip the first with value = zero which is background
        for unique_source in unique_sources[1:]:
            isource = isource + 1
            print(isource, unique_source)
            index = np.where(data == unique_source)
            print(index)
            data[index] = isource

    if ext != 2:
        itest = data > 0.5
        print('min: ', np.min(data[itest]))
        threshold = np.min(data[itest]) - 1

        print('threshold: ', threshold)

    print('max: ', np.max(data))

    mad_stdev = mad_std(data)
    print('mad_std:', mad_stdev)

    if ext != 2:
        data = data - threshold
        itest = data < 0
        data[itest] = 0

    median=np.median(data)
    print('median: ', median)

    position = (size/2, size/2)
    cutout = Cutout2D(data, position, size)
    if debug: help(cutout)

    if plot:

        plt.figure(figsize=(8,6))

        cmap = mpl.cm.jet
        if segmap:
            #cmap = mpl.cm.jet_r
            #cmap.set_under(color='w')
            #cmax = np.max(data)
            #cmap.set_clim(1,cmax)
            #itest = data > 0.5
            #data[itest] = np.nan
            data = np.ma.masked_where(data < 0.5, data)
            cmap.set_bad('w')
        #plt.imshow(cutout.data, origin='lower', interpolation='nearest')
            plt.imshow(data, origin='lower', interpolation='nearest',
                cmap=cmap)

        if not segmap:
            crange = 50
            if weightmap:crange = 10
            lower = -1.0
            vmin = median + (lower * mad_stdev)
            vmax=  min([median+(crange*mad_stdev),max])
            plt.imshow(data, origin='lower', interpolation='nearest',
                cmap=cmap,
                vmin=vmin, vmax=vmax)

        plt.gca().invert_xaxis()

        plt.xlabel('pixels')
        plt.ylabel('pixels')
        if title is not None: plt.title(title)
        if suptitle is not None: plt.suptitle(suptitle)
        plt.colorbar()
        plotid()


        if saveplot:
            plotfile = 'cutout'
            if segmap:
                plotfile = 'cutout_segmap'
            if weightmap:
                plotfile = 'cutout_weightmap'

            if plotfile_suffix is not None:
                plotfile = plotfile + '_' + plotfile_suffix

            if plotfile_prefix is not None:
                plotfile = plotfile_prefix + '_' + plotfile

            plotfile = plotfile + '.png'
            print('Saving :', plotfile)
            plt.savefig(plotfile)

        plt.show()

    return cutout.data
Esempio n. 59
0
from toolkit import PCA_light_curve, PhotometryResults
from toolkit.transit_model import params_c, transit_model_c_depth_t0
import astropy.units as u
from astropy.stats import mad_std

samples = np.load('outputs/samples_c.npy')[1000:, :]#np.loadtxt('outputs/samples_converged.txt')
path = 'outputs/trappist1c_20160619.npz'
phot_results = PhotometryResults.load(path)
times = phot_results.times

transit_parameters = params_c
light_curve = PCA_light_curve(phot_results, transit_parameters,
                              validation_duration_fraction=0.5, plots=False,
                              plot_validation=False, validation_time=0.9,
                              outlier_rejection=False)
light_curve_errors = np.ones_like(light_curve) * mad_std(light_curve)

init_transit_model = transit_model_c_depth_t0(times, params_c.rp**2, params_c.t0)

residuals = light_curve - init_transit_model

n_iterations = 5
inliers = np.zeros_like(residuals).astype(bool)
while np.count_nonzero(~inliers) > 0:
    inliers = np.abs(residuals) < 2.5*mad_std(residuals)
    print('remove {0}'.format(np.count_nonzero(~inliers)))
    times = times[inliers]
    light_curve = light_curve[inliers]
    light_curve_errors = light_curve_errors[inliers]
    residuals = residuals[inliers]