Exemple #1
0
def findStars(image):
    # In order to use the MAD as a consistent estimator for the estimation
    # of the standard deviation σ, one takes σ = k * MAD where k is a constant
    # scale factor, which depends on the distribution. For normally distributed
    # data k is taken to be k = 1.48[26]
    bkg_sigma = 1.48 * mad(image)
    t = 5 * bkg_sigma
    daofind = DAOStarFinder(fwhm=3.0, threshold=t)
    stars = daofind(image)

    #stars['signal'] = stars['flux'] * t
    #
    data = image
    mask = make_source_mask(data, snr=2, npixels=5, dilate_size=11)
    mean, median, std = sigma_clipped_stats(data, sigma=3.0, mask=mask)
    madstd = mad_std(data)

    snrs = []
    for peak in stars['peak']:
        snrs.append(peak / madstd / 7.4)
    stars['snr'] = snrs
    print((mean, median, std, bkg_sigma, t, madstd))
    #
    #print stars
    return stars
Exemple #2
0
def test_combiner_uncertainty_median_mask():
    mad_to_sigma = 1.482602218505602
    mask = np.zeros((10, 10), dtype=np.bool_)
    mask[5, 5] = True
    ccd_with_mask = CCDData(np.ones((10, 10)), unit=u.adu, mask=mask)
    ccd_list = [ccd_with_mask,
                CCDData(np.ones((10, 10))*2, unit=u.adu),
                CCDData(np.ones((10, 10))*3, unit=u.adu)]
    c = Combiner(ccd_list)
    ccd = c.median_combine()
    # Just the standard deviation of ccd data.
    ref_uncertainty = np.ones((10, 10)) * mad_to_sigma * mad([1, 2, 3])
    # Correction because we combined two images.
    ref_uncertainty /= np.sqrt(3)  # 0.855980789955
    ref_uncertainty[5, 5] = mad_to_sigma * mad([2, 3]) / np.sqrt(2) # 0.524179041254
    np.testing.assert_array_almost_equal(ccd.uncertainty.array,
                                         ref_uncertainty)
Exemple #3
0
def fitness(h, twindows, swindows):
    #img_denoise será nossa imagem com o filtro aplicado
    img_denoise = cv2.fastNlMeansDenoising(img_noise, None, h, twindows,
                                           swindows)
    #Depois pegamos essa imagem com o filtro aplicado, e vemos seu nivel de ruido
    fit = 1 / mad(img_denoise, axis=None)
    #fit será o valor do fitness de cada individuo
    return fit
Exemple #4
0
def test_combiner_uncertainty_median_mask():
    mad_to_sigma = 1.482602218505602
    mask = np.zeros((10, 10), dtype=np.bool_)
    mask[5, 5] = True
    ccd_with_mask = CCDData(np.ones((10, 10)), unit=u.adu, mask=mask)
    ccd_list = [ccd_with_mask,
                CCDData(np.ones((10, 10)) * 2, unit=u.adu),
                CCDData(np.ones((10, 10)) * 3, unit=u.adu)]
    c = Combiner(ccd_list)
    ccd = c.median_combine()
    # Just the standard deviation of ccd data.
    ref_uncertainty = np.ones((10, 10)) * mad_to_sigma * mad([1, 2, 3])
    # Correction because we combined two images.
    ref_uncertainty /= np.sqrt(3)  # 0.855980789955
    ref_uncertainty[5, 5] = mad_to_sigma * \
        mad([2, 3]) / np.sqrt(2)  # 0.524179041254
    np.testing.assert_array_almost_equal(ccd.uncertainty.array,
                                         ref_uncertainty)
def findStars(image):
    # In order to use the MAD as a consistent estimator for the estimation
    # of the standard deviation σ, one takes σ = k * MAD where k is a constant
    # scale factor, which depends on the distribution. For normally distributed
    # data k is taken to be k = 1.48[26]
    bkg_sigma = 1.48 * mad(image)
    stars = daofind(image, fwhm=3.0, threshold=5 * bkg_sigma)
    #print stars
    return stars
Exemple #6
0
def patch_rlrps(array,
                rank,
                low_rank_mode,
                thresh,
                thresh_mode,
                max_iter,
                random_seed,
                debug=False,
                full_output=False):
    """ Patch decomposition based on GoDec/SSGoDec (Zhou & Tao 2011) """
    ### Initializing L and S
    L = array
    S = np.zeros_like(L)
    random_state = np.random.RandomState(random_seed)
    itr = 0
    power = 0

    while itr <= max_iter:
        ### Updating L
        if low_rank_mode == 'brp':
            Y2 = random_state.randn(L.shape[1], rank)
            for _ in range(power + 1):
                Y1 = np.dot(L, Y2)
                Y2 = np.dot(L.T, Y1)
            Q, _ = qr(Y2, mode='economic')
            Lnew = np.dot(np.dot(L, Q), Q.T)

        elif low_rank_mode == 'svd':
            PC = svd_wrapper(L, 'randsvd', rank, False, False)
            Lnew = np.dot(np.dot(L, PC.T), PC)

        else:
            raise RuntimeError('Wrong Low Rank estimation mode')

        ### Updating S
        T = L - Lnew + S
        if itr == 0:
            threshold = np.sqrt(mad(T.ravel())) * thresh
            if debug: print('threshold level = {:.3f}'.format(threshold))

        S = thresholding(T, threshold, thresh_mode)
        T -= S
        L = Lnew + T
        itr += 1

    if full_output:
        return L, S, array - L - S
    else:
        return S
Exemple #7
0
def _snr_approx(array, source_xy, fwhm, centery, centerx):
    """
    array - frame convolved with top hat kernel
    """
    sourcex, sourcey = source_xy
    rad = dist(centery, centerx, sourcey, sourcex)
    ind_aper = disk((sourcey, sourcex), fwhm / 2.)
    # noise : STDDEV in convolved array of 1px wide annulus (while
    # masking the flux aperture) * correction of # of resolution elements
    ind_ann = circle_perimeter(int(centery), int(centerx), int(rad))
    array2 = array.copy()
    array2[ind_aper] = mad(array[ind_ann])  # mask
    n2 = (2 * np.pi * rad) / fwhm - 1
    noise = array2[ind_ann].std(ddof=1) * np.sqrt(1 + (1 / n2))
    # signal : central px minus the mean of the pxs (masked) in 1px annulus
    signal = array[sourcey, sourcex] - array2[ind_ann].mean()
    snr_value = signal / noise
    return sourcey, sourcex, snr_value
Exemple #8
0
def _snr_approx(array, source_xy, fwhm, centery, centerx):
    """
    array - frame convolved with top hat kernel
    """
    sourcex, sourcey = source_xy
    rad = dist(centery, centerx, sourcey, sourcex)
    ind_aper = draw.circle(sourcey, sourcex, fwhm/2.)
    # noise : STDDEV in convolved array of 1px wide annulus (while
    # masking the flux aperture) * correction of # of resolution elements
    ind_ann = draw.circle_perimeter(int(centery), int(centerx), int(rad))
    array2 = array.copy()
    array2[ind_aper] = mad(array[ind_ann])  # mask
    n2 = (2 * np.pi * rad) / fwhm - 1
    noise = array2[ind_ann].std() * np.sqrt(1+(1/n2))
    # signal : central px minus the mean of the pxs (masked) in 1px annulus
    signal = array[sourcey, sourcex] - array2[ind_ann].mean()
    snr_value = signal / noise
    return sourcey, sourcex, snr_value
Exemple #9
0
def patch_rlrps(array, rank, low_rank_mode, thresh, thresh_mode, max_iter, 
                random_seed, debug=False, full_output=False):
    """ Patch decomposition based on GoDec/SSGoDec (Zhou & Tao 2011) """           
    ### Initializing L and S
    L = array
    S = np.zeros_like(L)
    random_state = np.random.RandomState(random_seed)
    itr = 0    
    power = 0
    
    while itr<=max_iter:          
        ### Updating L
        if low_rank_mode=='brp':
            Y2 = random_state.randn(L.shape[1], rank)
            for _ in range(power+1):
                Y1 = np.dot(L, Y2)
                Y2 = np.dot(L.T, Y1)
            Q, _ = qr(Y2, mode='economic')    
            Lnew = np.dot(np.dot(L, Q), Q.T)    
        
        elif low_rank_mode=='svd':
            PC = svd_wrapper(L, 'randsvd', rank, False, False)
            Lnew = np.dot(np.dot(L, PC.T), PC)
        
        else:
            raise RuntimeError('Wrong Low Rank estimation mode')

        ### Updating S
        T = L - Lnew + S
        if itr==0:
            threshold = np.sqrt(mad(T.ravel()))*thresh
            if debug:  print('threshold level = {:.3f}'.format(threshold))
                
        S = thresholding(T, threshold, thresh_mode)   
        T -= S
        L = Lnew + T
        itr += 1
    
    if full_output:
        return L, S, array-L-S 
    else:
        return S
def __plot_degradation(cat,
                       ax,
                       color='blue',
                       label=None,
                       title=None,
                       ylabel=None,
                       force=True):

    cnames = ['Spirals', 'Ellipticals']
    colors = ['blue', 'red']
    linestyles = ['-', '-']
    for aclass, color, ls, cname in zip([1, 2], colors, linestyles, cnames):
        mus = []
        sigmas = []

        for column in cat.data:
            logging.info(
                np.median(column[np.where(
                    cat.classes == aclass)].compressed().astype(float)))
            #mu, sigma = np.array(norm.fit(column[np.where(cat.classes == aclass)].compressed().astype(float)))
            mus.append(
                np.median(column[np.where(
                    cat.classes == aclass)].compressed().astype(float)))
            sigmas.append(1.5 * mad(column[np.where(
                cat.classes == aclass)].compressed().astype(float)))

        mus = np.array(mus)
        sigmas = np.array(sigmas)
        ax.plot(cat.zs.astype(float),
                mus,
                ls,
                lw=1.5,
                color=color,
                label=cname)
        #ax.plot(cat.zs.astype(float), mus-2*sigmas, '--', lw=1, color=color, label=label)
        #ax.plot(cat.zs.astype(float), mus+2*sigmas, '--', lw=1, color=color, label=label)
        ax.fill_between(cat.zs.astype(float),
                        mus - sigmas,
                        mus + sigmas,
                        facecolor=color,
                        alpha=0.5)
Exemple #11
0
def tag_outliers(data, window_size, axis, sigma=10):
    assert window_size % 2 == 0

    data_mad = np.zeros_like(data)
    data_med = np.zeros_like(data)
    #data_tag = np.zeros_like(data).astype(bool)

    pad_width = np.zeros((len(data.shape), 2)).astype(int)
    pad_width[axis] = (window_size // 2, window_size // 2)

    data_pad = np.pad(data, pad_width, mode='constant', constant_values=np.nan)

    # compute rolling MAD and median
    for i in range(data_mad.shape[0]):
        data_mad[i] = mad(data[i:i + window_size], axis=axis, ignore_nan=True)
        data_med[i] = np.nanmedian(data[i:i + window_size], axis=axis)

    # tag
    data_tag = np.abs(data - data_med) / data_mad > sigma * 0.67449
    #     print('Rejecting MAD >', sigma * 0.67449)
    #     print('Total rejected in each IF:', data_tag.sum(0))
    return data_tag
Exemple #12
0
def snrmap(array, fwhm, plot=False, mode='sss', source_mask=None, nproc=None):
    """Parallel implementation of the SNR map generation function. Applies the 
    S/N function (small samples penalty) at each pixel.
    
    Parameters
    ----------
    array : array_like
        Input frame.
    fwhm : float
        Size in pixels of the FWHM.
    plot : {False, True}, bool optional
        If True plots the SNR map. 
    mode : {'sss', 'peakstddev'}, string optional
        'sss' uses the approach with the small sample statistics penalty and
        'peakstddev' uses the peak(aperture)/std(annulus) version.
    source_mask : array_like, optional
        If exists, it takes into account existing sources. The mask is a ones
        2d array, with the same size as the input frame. The centers of the 
        known sources have a zero value.
    nproc : int or None
        Number of processes for parallel computing.
    
    Returns
    -------
    snrmap : array_like
        Frame with the same size as the input frame with each pixel.
        
    """
    start_time = timeInit()
    if not array.ndim==2:
        raise TypeError('Input array is not a 2d array or image.')
    if plot:  plt.close('snr')
        
    sizey, sizex = array.shape
    snrmap = np.zeros_like(array)
    width = min(sizey,sizex)/2 - 1.5*fwhm
    mask = get_annulus(array, (fwhm/2)+1, width)
    mask = np.ma.make_mask(mask)
    yy, xx = np.where(mask)
    coords = zip(xx,yy)
        
    if not nproc:  
        nproc = int((cpu_count()/2))  # Hyper-threading doubles the # of cores
    
    if mode == 'sss':
        F = snr_ss
    elif mode == 'peakstddev':
        F = snr_peakstddev
    else:
        raise TypeError('\nMode not recognized.')
    
    if source_mask is None:
        pool = Pool(processes=int(nproc))                                        
        res = pool.map(eval_func_tuple, itt.izip(itt.repeat(F),itt.repeat(array),
                                                 coords, itt.repeat(fwhm),
                                                 itt.repeat(True)))       
        res = np.array(res)
        pool.close()
        yy = res[:,0]
        xx = res[:,1]
        snr = res[:,2]
        snrmap[yy.astype('int'), xx.astype('int')] = snr
    else:
        # checking the mask with the sources
        if not array.shape == source_mask.shape:
            raise RuntimeError('Source mask has wrong size.')
        if source_mask[source_mask == 0].shape[0] == 0:
            msg = 'Input source mask is empty.'
            raise RuntimeError(msg)
        if source_mask[source_mask == 0].shape[0] > 20:
            msg = 'Input source mask is too crowded. Check its validity.'
            raise RuntimeError(msg)
        
        soury, sourx = np.where(source_mask == 0)
        sources = []
        ciry = []; cirx = []; anny = []; annx = []
        array_sources = array.copy()
        centery, centerx = frame_center(array)
        for (y,x) in zip(soury,sourx):
            radd = dist(centery, centerx, y, x)
            if int(np.floor(radd)) < centery - np.ceil(fwhm):
                sources.append((y,x))
        
        for source in sources:
            y, x = source        
            radd = dist(centery, centerx, y, x)
            tempay, tempax = get_annulus(array, int(np.floor(radd-fwhm)), 
                                    int(np.ceil(2*fwhm)), output_indices=True)
            tempcy, tempcx = draw.circle(y, x, int(np.ceil(1*fwhm)))
            # masking the source position (using the MAD of pixels in annulus)
            array_sources[tempcy, tempcx] = mad(array[tempay, tempax])
            ciry += list(tempcy); cirx += list(tempcx)
            anny += list(tempay); annx += list(tempax)

        # coordinates of annulus without the sources
        coor_ann = [(y,x) for (y,x) in zip(anny, annx) if (y,x) not in zip(ciry, cirx)]

        # coordinates of the rest of the frame without the annulus
        coor_rest = [(y,x) for (y,x) in zip(yy, xx) if (y,x) not in coor_ann]
        
        pool1 = Pool(processes=int(nproc))
        res = pool1.map(eval_func_tuple, itt.izip(itt.repeat(F),itt.repeat(array),
                                                  coor_rest, itt.repeat(fwhm),
                                                  itt.repeat(True)))       
        res = np.array(res)
        pool1.close()
        yy = res[:,0]
        xx = res[:,1]
        snr = res[:,2]
        snrmap[yy.astype('int'), xx.astype('int')] = snr
        
        pool2 = Pool(processes=int(nproc))
        res = pool2.map(eval_func_tuple, itt.izip(itt.repeat(F),
                                                  itt.repeat(array_sources),
                                                  coor_ann, itt.repeat(fwhm),
                                                  itt.repeat(True)))       
        res = np.array(res)
        pool2.close()
        yy = res[:,0]
        xx = res[:,1]
        snr = res[:,2]
        snrmap[yy.astype('int'), xx.astype('int')] = snr
    
    if plot:  pp_subplots(snrmap, colorb=True, title='S/N map')
        
    print "S/N map created using {:} processes.".format(nproc)
    timing(start_time)
    return snrmap
Exemple #13
0
def fit_sources(image1d, psfbase, shape, normperim, medianim, mastermask,
                threshold=12, **kwargs):
    '''find and fit sources in the image

    perform PSF subtraction and then find and fit sources
    see comments in code for details

    Parameters
    ----------
    image1d : ndarray
        flattened, normalized image
    psfbase : ndarray
       2d array of psf templates (PSF library)
    threshold : float
        Detection threshold. Higher numbers find only the stronger sources.
        Experiment to find the right value.
    kwargs : dict or names arguments
        arguments for daofind (fwmh, min and max roundness, etc.)

    Returns
    -------
    fluxes_gaussian : astropy.table.Table
    imag :
        PSF subtracted image
    scaled_im :
        PSF subtracted image in daofind scaling

    '''
    psf_coeff = psf_from_projection(image1d, psfbase)
    im = image1d - np.dot(psfbase, psf_coeff)
    bkg_sigma = 1.48 * mad(im)

    # Do source detection on 2d, scaled image
    scaled_im = remove_normmask(im.reshape((-1, 1)), np.ones(1), np.ones_like(medianim), mastermask).reshape(shape)
    imag = remove_normmask(im.reshape((-1, 1)), normperim, medianim, mastermask).reshape(shape)
    sources = photutils.daofind(scaled_im, threshold=threshold * bkg_sigma, **kwargs)

    if len(sources) == 0:
        return None, imag, scaled_im
    else:
        # insert extra step here to find the brightest source, subtract it and
        # redo the PSF fit or add a PSF model to psfbase to improve the PSF fit
        # I think 1 level of that is enough, no infinite recursion.
        # Idea 1: mask out a region around the source, so that this does not
        #         influence the PSF fit.
        newmask = deepcopy(mastermask).reshape(shape)
        for source in sources:
            sl, temp = overlap_slices(shape, [9,9], [source['xcentroid'], source['ycentroid']])
            newmask[sl[0], sl[1]] = True
        newmask = newmask.flatten()

        psf_coeff = psf_from_projection(image1d[~(newmask[~mastermask])],
                                        psfbase[~(newmask[~mastermask]), :])
        im = image1d - np.dot(psfbase, psf_coeff)
        scaled_im = remove_normmask(im.reshape((-1, 1)), np.ones(1), np.ones_like(medianim), mastermask).reshape(shape)

        imag = remove_normmask(im.reshape((-1, 1)), normperim, medianim, mastermask).reshape(shape)
        # cosmics in the image lead to high points, which means that the
        # average area will be overcorrected
        imag = imag - np.ma.median(imag)
        # do photometry on image in real space

        psf_gaussian = photutils.psf.GaussianPSF(1.8)  # width measured by hand
        # default in photutils is to freeze this stuff, but I disagree
        # psf_gaussian.fixed['sigma'] = False
        # psf_gaussian.fixed['x_0'] = False
        # psf_gaussian.fixed['y_0'] = False
        fluxes_gaussian = photutils.psf.psf_photometry(imag, sources['xcentroid', 'ycentroid'], psf_gaussian)

        '''Estimate flux of Gaussian PSF from A and sigma.

        Should be part of photutils in a more clever (analytic) implementation.
        As long as it's missing there, but in this crutch here.
        '''
        x, y = np.mgrid[-3:3, -4:4]
        amp2flux = np.sum(psf_gaussian.evaluate(x, y, 1, 1, 0, 1.8))  # 1.8 hard-coded above
        fluxes_gaussian.add_column(MaskedColumn(name='flux_fit', data=amp2flux * fluxes_gaussian['amplitude_fit']))

        return fluxes_gaussian, imag, scaled_im
Exemple #14
0
fig = plt.figure(figsize=(4.5, 3.5))
ax = fig.add_subplot(111)
idx = np.isfinite(np.log10((t['13co10']) / t['12co32']))
t2 = t[idx]
corner.hist2d(np.log10(t[idx]['flux']),
              np.log10((t[idx]['13co10']) / t[idx]['12co32']),
              ax=ax)

# ax.scatter(t['flux'], t['13co10']/t['12co32'], edgecolor='k',
#            facecolor='gray', alpha=0.2)
ax.set_ylim(-3, 0)
ax.set_xlabel(
    r'$\log_{10}[F_{\mathrm{CO(3-2)}}/(\mathrm{K\ km\ s^{-1}\ pc^{2}})]$')
ax.set_ylabel(r'$\log_{10}[F_{\mathrm{{}^{13}CO(1-0)}}/F_{\mathrm{CO(3-2)}}]$')
plt.tight_layout()
#ax.set_xscale("log", nonposx='clip')
#ax.set_yscale("log", nonposy='clip')

idx2 = t2['flux'] > 1e3
print(np.median(t2[idx2]['13co10'] / t2[idx2]['12co32']))
print(mad(np.log(t2[idx2]['13co10'] / t2[idx2]['12co32'])))
count = idx2.sum()
print('Number of clouds: {0}'.format(count))

plt.savefig('R13.pdf')
# sns.jointplot(np.log10(t[idx]['flux']),
#               np.log10((t[idx]['13co10'])/t[idx]['12co32']),
#               kind="hex")
# plt.ylim(-3,1)
Exemple #15
0
def getThreshold(path, index=0):
    fp = pyfits.open(path)
    hdu = fp[index]
    image = hdu.data.astype('float32')
    bkg_sigma = 1.48 * mad(image)
    return 5 * bkg_sigma
Exemple #16
0
def snrmap(array, fwhm, plot=False, mode='sss', source_mask=None, nproc=None,
           save_plot=None, plot_title=None, verbose=True, array2=None, 
           use2alone=False):
    """Parallel implementation of the S/N map generation function. Applies the
    S/N function (small samples penalty) at each pixel.

    Parameters
    ----------
    array : 2d array_like
        Input frame.
    fwhm : float
        Size in pixels of the FWHM.
    plot : bool, optional
        If True plots the S/N map. False by default.
    mode : {'sss', 'peakstddev'}, string optional
        'sss' uses the approach with the small sample statistics penalty and
        'peakstddev' uses the peak(aperture)/std(annulus) version.
    source_mask : array_like, optional
        If exists, it takes into account existing sources. The mask is a ones
        2d array, with the same size as the input frame. The centers of the
        known sources have a zero value.
    nproc : int or None
        Number of processes for parallel computing.
    save_plot : string
        If provided, the S/N map is saved to this path.
    plot_title : string
        If provided, the S/N map plot is titled.
    verbose: bool, optional
        Whether to print timing or not.
    array2 : array_like, 2d, opt
        Additional image (e.g. processed image with negative derotation angles) 
        enabling to have more noise samples. Should have the 
        same dimensions as array.
    use2alone: bool, opt
        Whether to use array2 alone to estimate the noise (might be useful to 
        estimate the snr of extended disk features)

    Returns
    -------
    snrmap : 2d array_like
        Frame with the same size as the input frame with each pixel.
    """
    if verbose:
        start_time = time_ini()
    if array.ndim != 2:
        raise TypeError('Input array is not a 2d array or image.')
    if plot:
        plt.close('snr')

    sizey, sizex = array.shape
    snrmap = np.zeros_like(array)
    width = min(sizey, sizex) / 2 - 1.5 * fwhm
    mask = get_annulus_segments(array, (fwhm / 2) + 2, width, mode="mask")[0]
    mask = np.ma.make_mask(mask)
    # by making a bool mask *after* applying the mask to the array, we also mask
    # out zero values from the array. This logic cannot be simplified by using
    # mode="ind"!
    yy, xx = np.where(mask)
    coords = zip(xx, yy)

    if nproc is None:
        nproc = cpu_count() // 2        # Hyper-threading doubles the # of cores

    if mode == 'sss':
        func = snr_ss
    elif mode == 'peakstddev':
        func = snr_peakstddev
    else:
        raise TypeError('\nMode not recognized.')

    if source_mask is None:
        pool = Pool(processes=nproc)
        res = pool.map(EFT, zip(itt.repeat(func), itt.repeat(array), coords,
                                itt.repeat(fwhm), itt.repeat(True),
                                itt.repeat(array2), itt.repeat(use2alone)))
        res = np.array(res)
        pool.close()
        yy = res[:, 0]
        xx = res[:, 1]
        snr = res[:, 2]
        snrmap[yy.astype('int'), xx.astype('int')] = snr
    else:
        # checking the mask with the sources
        if array.shape != source_mask.shape:
            raise RuntimeError('Source mask has wrong size.')
        if source_mask[source_mask == 0].shape[0] == 0:
            msg = 'Input source mask is empty.'
            raise RuntimeError(msg)
        if source_mask[source_mask == 0].shape[0] > 20:
            msg = 'Input source mask is too crowded. Check its validity.'
            raise RuntimeError(msg)

        soury, sourx = np.where(source_mask == 0)
        sources = []
        ciry = []
        cirx = []
        anny = []
        annx = []
        array_sources = array.copy()
        centery, centerx = frame_center(array)
        for (y, x) in zip(soury, sourx):
            radd = dist(centery, centerx, y, x)
            if int(radd) < centery - np.ceil(fwhm):
                sources.append((y, x))

        for source in sources:
            y, x = source
            radd = dist(centery, centerx, y, x)
            tempay, tempax = get_annulus_segments(array, int(radd-fwhm),
                                                  int(np.ceil(2*fwhm)))[0]
            tempcy, tempcx = draw.circle(y, x, int(np.ceil(1*fwhm)))
            # masking the source position (using the MAD of pixels in annulus)
            array_sources[tempcy, tempcx] = mad(array[tempay, tempax])
            ciry += list(tempcy)
            cirx += list(tempcx)
            anny += list(tempay)
            annx += list(tempax)

        # coordinates of annulus without the sources
        coor_ann = [(y, x) for (y, x) in zip(anny, annx)
                    if (y, x) not in zip(ciry, cirx)]

        # coordinates of the rest of the frame without the annulus
        coor_rest = [(y, x) for (y, x) in zip(yy, xx) if (y, x) not in coor_ann]

        pool1 = Pool(processes=nproc)
        res = pool1.map(EFT, zip(itt.repeat(func), itt.repeat(array), coor_rest,
                                 itt.repeat(fwhm), itt.repeat(True),
                                 itt.repeat(array2), itt.repeat(use2alone)))
        res = np.array(res)
        pool1.close()
        yy = res[:, 0]
        xx = res[:, 1]
        snr = res[:, 2]
        snrmap[yy.astype('int'), xx.astype('int')] = snr

        pool2 = Pool(processes=nproc)
        res = pool2.map(EFT, zip(itt.repeat(func), itt.repeat(array_sources),
                                 coor_ann, itt.repeat(fwhm), itt.repeat(True),
                                 itt.repeat(array2), itt.repeat(use2alone)))
        res = np.array(res)
        pool2.close()
        yy = res[:, 0]
        xx = res[:, 1]
        snr = res[:, 2]
        snrmap[yy.astype('int'), xx.astype('int')] = snr

    if plot:
        pp_subplots(snrmap, colorb=True, title='S/N map')

    # Option to save snrmap in angular scale, using Keck NIRC2's ~0.01 pixel
    # scale. In this case, set plot = False
    elif save_plot is not None:
        pp_subplots(snrmap, colorb=True, title=plot_title, save=save_plot,
                    vmin=-1, vmax=5, angscale=True, getfig=True)

    if verbose:
        print("S/N map created using {} processes.".format(nproc))
        timing(start_time)
    return snrmap
Exemple #17
0
import numpy as np
from photutils import datasets

print "Python version:", sys.version
print "Astropy version:", astropy.__version__

hdu = datasets.load_star_image()   
image = hdu.data[500:700, 500:700]   
image = hdu.data
print np.median(image)
image -= np.median(image)


from photutils import daofind
from astropy.stats import median_absolute_deviation as mad
bkg_sigma = 1.48 * mad(image)   
sources = daofind(image, fwhm=4.0, threshold=3*bkg_sigma)   
print sources

for s in sources:
	print s

figure = matplotlib.pyplot.figure(figsize=(10, 10))
matplotlib.pyplot.title("Sample image")
matplotlib.pyplot.imshow(image, cmap='gray')
#matplotlib.pyplot.gca().invert_yaxis()	
matplotlib.pyplot.show()

ax=figure.add_subplot(1,1,1)
matplotlib.pyplot.axis('off')
		timeLeft = etaTime - currentTime
		(hours, mins, secs) = ultracamutils.timedeltaHoursMinsSeconds(timeLeft)
		timeLeftString = str(hours).zfill(2) + ":" + str(mins).zfill(2) + ":" + str(secs).zfill(2)
		
		ccdFrame = rdat()
		
		statusString = "\r%s Frame: [%d/%d]"%(timeLeftString, trueFrameNumber, frameRange)
		sys.stdout.write(statusString)
		sys.stdout.flush()
		
		windows = ccdFrame[0]
		
		for windowIndex, w in enumerate(windows):
			image = w._data
			allWindows[windowIndex].addData(image)
			bkg_sigma = 1.48 * mad(image)
			sources = daofind(image, fwhm=4.0, threshold=2.5*bkg_sigma)  
			sources.pprint()
			
			filteredSources = []
				
			
			for index, s in enumerate(sources):
				newSource = {}
				new = True
				newSource = (s['xcentroid'], s['ycentroid'], s['flux'])
				if index==0:
					filteredSources.append(newSource)
				for f in filteredSources:
					if  f[0]==newSource[0] and \
						f[1]==newSource[1] and \
Exemple #19
0
def snrmap(array,
           fwhm,
           plot=False,
           mode='sss',
           source_mask=None,
           nproc=None,
           array2=None,
           use2alone=False,
           verbose=True,
           **kwargs):
    """Parallel implementation of the S/N map generation function. Applies the
    S/N function (small samples penalty) at each pixel.

    Parameters
    ----------
    array : numpy.ndarray
        Input frame (2d array).
    fwhm : float
        Size in pixels of the FWHM.
    plot : bool, optional
        If True plots the S/N map. False by default.
    mode : {'sss', 'peakstddev'}, string optional
        'sss' uses the approach with the small sample statistics penalty and
        'peakstddev' uses the peak(aperture)/std(annulus) version.
    source_mask : array_like, optional
        If exists, it takes into account existing sources. The mask is a ones
        2d array, with the same size as the input frame. The centers of the
        known sources have a zero value.
    nproc : int or None
        Number of processes for parallel computing.
    array2 : numpy.ndarray, optional
        Additional image (e.g. processed image with negative derotation angles) 
        enabling to have more noise samples. Should have the 
        same dimensions as array.
    use2alone: bool, optional
        Whether to use array2 alone to estimate the noise (might be useful to 
        estimate the snr of extended disk features).
    verbose: bool, optional
        Whether to print timing or not.
    **kwargs : dictionary, optional
        Arguments to be passed to ``plot_frames`` to customize the plot (and to
        save it to disk).

    Returns
    -------
    snrmap : 2d array_like
        Frame with the same size as the input frame with each pixel.
    """
    if verbose:
        start_time = time_ini()
    if array.ndim != 2:
        raise TypeError('Input array is not a 2d array or image.')
    if plot:
        plt.close('snr')

    sizey, sizex = array.shape
    snrmap = np.zeros_like(array)
    width = min(sizey, sizex) / 2 - 1.5 * fwhm
    mask = get_annulus_segments(array, (fwhm / 2) + 2, width, mode="mask")[0]
    mask = np.ma.make_mask(mask)
    # by making a bool mask *after* applying the mask to the array, we also mask
    # out zero values from the array. This logic cannot be simplified by using
    # mode="ind"!
    yy, xx = np.where(mask)
    coords = zip(xx, yy)

    if nproc is None:
        nproc = cpu_count() // 2  # Hyper-threading doubles the # of cores

    if mode == 'sss':
        func = snr_ss
    elif mode == 'peakstddev':
        func = snr_peakstddev
    else:
        raise TypeError('\nMode not recognized.')

    if source_mask is None:
        res = pool_map(nproc, func, array, iterable(coords), fwhm, True,
                       array2, use2alone)
        res = np.array(res)
        yy = res[:, 0]
        xx = res[:, 1]
        snr = res[:, 2]
        snrmap[yy.astype('int'), xx.astype('int')] = snr
    else:
        # checking the mask with the sources
        if array.shape != source_mask.shape:
            raise RuntimeError('Source mask has wrong size.')
        if source_mask[source_mask == 0].shape[0] == 0:
            msg = 'Input source mask is empty.'
            raise RuntimeError(msg)
        if source_mask[source_mask == 0].shape[0] > 20:
            msg = 'Input source mask is too crowded. Check its validity.'
            raise RuntimeError(msg)

        soury, sourx = np.where(source_mask == 0)
        sources = []
        ciry = []
        cirx = []
        anny = []
        annx = []
        array_sources = array.copy()
        centery, centerx = frame_center(array)
        for (y, x) in zip(soury, sourx):
            radd = dist(centery, centerx, y, x)
            if int(radd) < centery - np.ceil(fwhm):
                sources.append((y, x))

        for source in sources:
            y, x = source
            radd = dist(centery, centerx, y, x)
            tempay, tempax = get_annulus_segments(array, int(radd - fwhm),
                                                  int(np.ceil(2 * fwhm)))[0]
            tempcy, tempcx = draw.circle(y, x, int(np.ceil(1 * fwhm)))
            # masking the source position (using the MAD of pixels in annulus)
            array_sources[tempcy, tempcx] = mad(array[tempay, tempax])
            ciry += list(tempcy)
            cirx += list(tempcx)
            anny += list(tempay)
            annx += list(tempax)

        # coordinates of annulus without the sources
        coor_ann = [(y, x) for (y, x) in zip(anny, annx)
                    if (y, x) not in zip(ciry, cirx)]

        # coordinates of the rest of the frame without the annulus
        coor_rest = [(y, x) for (y, x) in zip(yy, xx)
                     if (y, x) not in coor_ann]

        res = pool_map(nproc, func, array, iterable(coor_rest), fwhm, True,
                       array2, use2alone)
        res = np.array(res)
        yy = res[:, 0]
        xx = res[:, 1]
        snr = res[:, 2]
        snrmap[yy.astype('int'), xx.astype('int')] = snr

        res = pool_map(nproc, func, array_sources, iterable(coor_ann), fwhm,
                       True, array2, use2alone)
        res = np.array(res)
        yy = res[:, 0]
        xx = res[:, 1]
        snr = res[:, 2]
        snrmap[yy.astype('int'), xx.astype('int')] = snr

    if plot:
        plot_frames(snrmap, colorbar=True, title='S/N map', **kwargs)

    if verbose:
        print("S/N map created using {} processes.".format(nproc))
        timing(start_time)
    return snrmap
Exemple #20
0
fig = plt.figure(figsize=(4.5,3.5))
ax = fig.add_subplot(111)
idx = np.isfinite(np.log10((t['13co10'])/t['12co32']))
t2 = t[idx]
corner.hist2d(np.log10(t[idx]['flux']),
              np.log10((t[idx]['13co10'])/t[idx]['12co32']),
              ax=ax)

# ax.scatter(t['flux'], t['13co10']/t['12co32'], edgecolor='k',
#            facecolor='gray', alpha=0.2)
ax.set_ylim(-3,0)
ax.set_xlabel(r'$\log_{10}[F_{\mathrm{CO(3-2)}}/(\mathrm{K\ km\ s^{-1}\ pc^{2}})]$')
ax.set_ylabel(r'$\log_{10}[F_{\mathrm{{}^{13}CO(1-0)}}/F_{\mathrm{CO(3-2)}}]$')
plt.tight_layout()
#ax.set_xscale("log", nonposx='clip')
#ax.set_yscale("log", nonposy='clip')

idx2 = t2['flux']>1e3
print(np.median(t2[idx2]['13co10']/t2[idx2]['12co32']))
print(mad(np.log(t2[idx2]['13co10']/t2[idx2]['12co32'])))
count = idx2.sum()
print('Number of clouds: {0}'.format(count))


plt.savefig('R13.pdf')
# sns.jointplot(np.log10(t[idx]['flux']),
#               np.log10((t[idx]['13co10'])/t[idx]['12co32']),
#               kind="hex")
# plt.ylim(-3,1)

Exemple #21
0
def snrmap(array,
           fwhm,
           plot=False,
           mode='sss',
           source_mask=None,
           nproc=None,
           save_plot=None,
           plot_title=None,
           verbose=True):
    """Parallel implementation of the S/N map generation function. Applies the
    S/N function (small samples penalty) at each pixel.
    
    Parameters
    ----------
    array : array_like
        Input frame.
    fwhm : float
        Size in pixels of the FWHM.
    plot : bool, optional
        If True plots the S/N map. False by default.
    mode : {'sss', 'peakstddev'}, string optional
        'sss' uses the approach with the small sample statistics penalty and
        'peakstddev' uses the peak(aperture)/std(annulus) version.
    source_mask : array_like, optional
        If exists, it takes into account existing sources. The mask is a ones
        2d array, with the same size as the input frame. The centers of the 
        known sources have a zero value.
    nproc : int or None
        Number of processes for parallel computing.
    save_plot : string
        If provided, the S/N map is saved to this path.
    plot_title : string
        If provided, the S/N map plot is titled.
    verbose: bool, optional
        Whether to print timing or not.
    
    Returns
    -------
    snrmap : array_like
        Frame with the same size as the input frame with each pixel.
    """
    if verbose:
        start_time = time_ini()
    if array.ndim != 2:
        raise TypeError('Input array is not a 2d array or image.')
    if plot: plt.close('snr')

    sizey, sizex = array.shape
    snrmap = np.zeros_like(array)
    width = min(sizey, sizex) / 2 - 1.5 * fwhm
    mask = get_annulus(array, (fwhm / 2) + 1, width)
    mask = np.ma.make_mask(mask)
    yy, xx = np.where(mask)
    coords = zip(xx, yy)

    if nproc is None:
        nproc = cpu_count() // 2  # Hyper-threading doubles the # of cores

    if mode == 'sss':
        func = snr_ss
    elif mode == 'peakstddev':
        func = snr_peakstddev
    else:
        raise TypeError('\nMode not recognized.')

    if source_mask is None:
        pool = Pool(processes=nproc)
        res = pool.map(
            EFT,
            zip(itt.repeat(func), itt.repeat(array), coords, itt.repeat(fwhm),
                itt.repeat(True)))
        res = np.array(res)
        pool.close()
        yy = res[:, 0]
        xx = res[:, 1]
        snr = res[:, 2]
        snrmap[yy.astype('int'), xx.astype('int')] = snr
    else:
        # checking the mask with the sources
        if array.shape != source_mask.shape:
            raise RuntimeError('Source mask has wrong size.')
        if source_mask[source_mask == 0].shape[0] == 0:
            msg = 'Input source mask is empty.'
            raise RuntimeError(msg)
        if source_mask[source_mask == 0].shape[0] > 20:
            msg = 'Input source mask is too crowded. Check its validity.'
            raise RuntimeError(msg)

        soury, sourx = np.where(source_mask == 0)
        sources = []
        ciry = []
        cirx = []
        anny = []
        annx = []
        array_sources = array.copy()
        centery, centerx = frame_center(array)
        for (y, x) in zip(soury, sourx):
            radd = dist(centery, centerx, y, x)
            if int(radd) < centery - np.ceil(fwhm):
                sources.append((y, x))

        for source in sources:
            y, x = source
            radd = dist(centery, centerx, y, x)
            tempay, tempax = get_annulus(array,
                                         int(radd - fwhm),
                                         int(np.ceil(2 * fwhm)),
                                         output_indices=True)
            tempcy, tempcx = draw.circle(y, x, int(np.ceil(1 * fwhm)))
            # masking the source position (using the MAD of pixels in annulus)
            array_sources[tempcy, tempcx] = mad(array[tempay, tempax])
            ciry += list(tempcy)
            cirx += list(tempcx)
            anny += list(tempay)
            annx += list(tempax)

        # coordinates of annulus without the sources
        coor_ann = [(y, x) for (y, x) in zip(anny, annx)
                    if (y, x) not in zip(ciry, cirx)]

        # coordinates of the rest of the frame without the annulus
        coor_rest = [(y, x) for (y, x) in zip(yy, xx)
                     if (y, x) not in coor_ann]

        pool1 = Pool(processes=nproc)
        res = pool1.map(
            EFT,
            zip(itt.repeat(func), itt.repeat(array), coor_rest,
                itt.repeat(fwhm), itt.repeat(True)))
        res = np.array(res)
        pool1.close()
        yy = res[:, 0]
        xx = res[:, 1]
        snr = res[:, 2]
        snrmap[yy.astype('int'), xx.astype('int')] = snr

        pool2 = Pool(processes=nproc)
        res = pool2.map(
            EFT,
            zip(itt.repeat(func), itt.repeat(array_sources), coor_ann,
                itt.repeat(fwhm), itt.repeat(True)))
        res = np.array(res)
        pool2.close()
        yy = res[:, 0]
        xx = res[:, 1]
        snr = res[:, 2]
        snrmap[yy.astype('int'), xx.astype('int')] = snr

    if plot:
        pp_subplots(snrmap, colorb=True, title='S/N map')

    # Option to save snrmap in angular scale, using Keck NIRC2's ~0.01 pixel
    # scale. In this case, set plot = False
    elif save_plot is not None:
        pp_subplots(snrmap,
                    colorb=True,
                    title=plot_title,
                    save=save_plot,
                    vmin=-1,
                    vmax=5,
                    angscale=True,
                    getfig=True)

    if verbose:
        print("S/N map created using {} processes.".format(nproc))
        timing(start_time)
    return snrmap
Exemple #22
0
def snrmap(array,
           fwhm,
           approximated=False,
           plot=False,
           known_sources=None,
           nproc=None,
           array2=None,
           use2alone=False,
           exclude_negative_lobes=False,
           verbose=True,
           **kwargs):
    """Parallel implementation of the S/N map generation function. Applies the
    S/N function (small samples penalty) at each pixel.
    
    The S/N is computed as in Mawet et al. (2014) for each radial separation.    
    https://ui.adsabs.harvard.edu/abs/2014ApJ...792...97M/abstract
    
    *** DISCLAIMER ***
    Signal-to-noise ratio is not significance! For a conversion from snr to 
    n-sigma (i.e. the equivalent confidence level of a Gaussian n-sigma), use 
    the significance() function.    
    
    
    Parameters
    ----------
    array : numpy ndarray
        Input frame (2d array).
    fwhm : float
        Size in pixels of the FWHM.
    approximated : bool, optional
        If True, an approximated S/N map is generated.
    plot : bool, optional
        If True plots the S/N map. False by default.
    known_sources : None, tuple or tuple of tuples, optional
        To take into account existing sources. It should be a tuple of float/int
        or a tuple of tuples (of float/int) with the coordinate(s) of the known
        sources.
    nproc : int or None
        Number of processes for parallel computing.
    array2 : numpy ndarray, optional
        Additional image (e.g. processed image with negative derotation angles) 
        enabling to have more noise samples. Should have the 
        same dimensions as array.
    use2alone: bool, optional
        Whether to use array2 alone to estimate the noise (might be useful to 
        estimate the snr of extended disk features).
    verbose: bool, optional
        Whether to print timing or not.
    **kwargs : dictionary, optional
        Arguments to be passed to ``plot_frames`` to customize the plot (and to
        save it to disk).

    Returns
    -------
    snrmap : 2d numpy ndarray
        Frame with the same size as the input frame with each pixel.
    """
    if verbose:
        start_time = time_ini()

    check_array(array, dim=2, msg='array')
    sizey, sizex = array.shape
    snrmap_array = np.zeros_like(array)
    width = min(sizey, sizex) / 2 - 1.5 * fwhm
    mask = get_annulus_segments(array, (fwhm / 2) + 2, width, mode="mask")[0]
    mask = np.ma.make_mask(mask)
    # by making a bool mask *after* applying the mask to the array, we also mask
    # out zero values from the array. This logic cannot be simplified by using
    # mode="ind"!
    yy, xx = np.where(mask)
    coords = zip(xx, yy)

    if nproc is None:
        nproc = cpu_count() // 2  # Hyper-threading doubles the # of cores

    if known_sources is None:

        # proxy to S/N calculation
        if approximated:
            cy, cx = frame_center(array)
            tophat_kernel = Tophat2DKernel(fwhm / 2)
            array = convolve(array, tophat_kernel)
            width = min(sizey, sizex) / 2 - 1.5 * fwhm
            mask = get_annulus_segments(array, (fwhm / 2) + 1,
                                        width - 1,
                                        mode="mask")[0]
            mask = np.ma.make_mask(mask)
            yy, xx = np.where(mask)
            coords = [(int(x), int(y)) for (x, y) in zip(xx, yy)]
            res = pool_map(nproc, _snr_approx, array, iterable(coords), fwhm,
                           cy, cx)
            res = np.array(res)
            yy = res[:, 0]
            xx = res[:, 1]
            snr_value = res[:, 2]
            snrmap_array[yy.astype(int), xx.astype(int)] = snr_value

        # computing s/n map with Mawet+14 definition
        else:
            res = pool_map(nproc, snr, array, iterable(coords), fwhm, True,
                           array2, use2alone, exclude_negative_lobes)
            res = np.array(res)
            yy = res[:, 0]
            xx = res[:, 1]
            snr_value = res[:, -1]
            snrmap_array[yy.astype('int'), xx.astype('int')] = snr_value

    # masking known sources
    else:
        if not isinstance(known_sources, tuple):
            raise TypeError("`known_sources` must be a tuple or tuple of "
                            "tuples")
        else:
            source_mask = np.zeros_like(array)
            if isinstance(known_sources[0], tuple):
                for coor in known_sources:
                    source_mask[coor[::-1]] = 1
            elif isinstance(known_sources[0], int):
                source_mask[known_sources[1], known_sources[0]] = 1
            else:
                raise TypeError("`known_sources` seems to have wrong type. It "
                                "must be a tuple of ints or tuple of tuples "
                                "(of ints)")

        # checking the mask with the sources
        if source_mask[source_mask == 1].shape[0] > 50:
            msg = 'Input source mask is too crowded (check its validity)'
            raise RuntimeError(msg)

        soury, sourx = np.where(source_mask == 1)
        sources = []
        coor_ann = []
        arr_masked_sources = array.copy()
        centery, centerx = frame_center(array)
        for y, x in zip(soury, sourx):
            radd = dist(centery, centerx, int(y), int(x))
            if int(radd) < centery - np.ceil(fwhm):
                sources.append((y, x))

        for source in sources:
            y, x = source
            radd = dist(centery, centerx, int(y), int(x))
            anny, annx = get_annulus_segments(array, int(radd - fwhm),
                                              int(np.round(3 * fwhm)))[0]

            ciry, cirx = disk((y, x), int(np.ceil(fwhm)))
            # masking the sources positions (using the MAD of pixels in annulus)
            arr_masked_sources[ciry, cirx] = mad(array[anny, annx])

            # S/Ns of annulus without the sources
            coor_ann = [(x, y) for (x, y) in zip(annx, anny)
                        if (x, y) not in zip(cirx, ciry)]
            res = pool_map(nproc, snr, arr_masked_sources, iterable(coor_ann),
                           fwhm, True, array2, use2alone,
                           exclude_negative_lobes)
            res = np.array(res)
            yy_res = res[:, 0]
            xx_res = res[:, 1]
            snr_value = res[:, 4]
            snrmap_array[yy_res.astype('int'),
                         xx_res.astype('int')] = snr_value
            coor_ann += coor_ann

        # S/Ns of the rest of the frame without the annulus
        coor_rest = [(x, y) for (x, y) in zip(xx, yy)
                     if (x, y) not in coor_ann]
        res = pool_map(nproc, snr, array, iterable(coor_rest), fwhm, True,
                       array2, use2alone, exclude_negative_lobes)
        res = np.array(res)
        yy_res = res[:, 0]
        xx_res = res[:, 1]
        snr_value = res[:, 4]
        snrmap_array[yy_res.astype('int'), xx_res.astype('int')] = snr_value

    if plot:
        plot_frames(snrmap_array, colorbar=True, title='S/N map', **kwargs)

    if verbose:
        print("S/N map created using {} processes".format(nproc))
        timing(start_time)
    return snrmap_array
def linechart(df, plot_label, line=True, full=False):
    """
    Function to build a linechart and export a PNG and an SVG of the image.
    
    Parameters
    ----------
    df : pandas dataframe
        dataframe to plot
        
    plot_label : string
        plot title
        
    line : boolean
        True for lineplot, False for scatterplot
        
    full : boolean
        True for ylim=[0, 1], False for ylim=[0, 3×max(mad)
        
    Returns
    -------
    plotted : boolean
        True if data plotted, False otherwise
    
    Outputs
    -------
    inline plot
    """
    try:
        start = min(df.index.values)
    except:
        print("End of data.")
        return False
    stop = max(df.index.values)
    print("Plotting...")
    print(plot_label)
    fig = plt.figure(figsize=(10, 8), dpi=75)
    plt.rcParams['agg.path.chunksize'] = 10000
    ax = fig.add_subplot(111)
    ax.set_ylabel('unit cube normalized vector length')
    mad_values = []
    ci = [0, 0]
    cls = list(color_palette.keys())
    for i, device in enumerate(list(df.columns)):
        if device.startswith('normalized'):
            d2 = device[25:]
        else:
            d2 = device
        plot_line = df[[device]].dropna()
        mp = mad(plot_line)
        if mp > 0:
            print(mp)
            mad_values.append(mp)
        else:
            mp = plot_line.std()[0]
            if mp > 0:
                print(mp)
                mad_values.append(mp)
            else:
                print(max(plot_line[[device]]))
                mad_values.append(max(plot_line[[device]]))
        label = d2
        for c in color_key:
            if c in d2 or d2 in c:
                cmap = color_key[c]
            if not cmap:
                cmap = color_palette[cls[ci[0]]][ci[1]]
                if ci[1] < len(color_palette[cls[ci[0]]]) - 1:
                    ci[1] = ci[1] + 1
                else:
                    ci[0] = ci[0] + 1 if ci[0] < (len(color_palette.keys()) -
                                                  1) else 0
                    ci[1] = 0
        if line:
            ax.plot_date(x=plot_line.index,
                         y=plot_line,
                         alpha=0.4,
                         label=label,
                         marker="",
                         linestyle="solid",
                         color=cmap)
        else:
            ax.plot_date(x=plot_line.index,
                         y=plot_line,
                         alpha=0.4,
                         label=label,
                         marker="o",
                         linestyle="None",
                         color=cmap)
        ax.legend(loc='best', fancybox=True, framealpha=0.5)
    try:
        ylim = max(mad_values)
    except:
        ylim = 0
    if full or ylim == 0:
        ax.set_ylim([0, 1])
    else:
        try:
            ax.set_ylim([0, 3 * ylim])
        except:
            ax.set_ylim([0, 1])
    ax.xaxis.set_major_formatter(DateFormatter('%H:%M:%S'))
    plt.suptitle(plot_label)
    plt.xticks(rotation=65)
    plt.show()
    return True
Exemple #24
0
def snrmap(array, fwhm, approximated=False, plot=False, known_sources=None,
           nproc=None, array2=None, use2alone=False, verbose=True, **kwargs):
    """Parallel implementation of the S/N map generation function. Applies the
    S/N function (small samples penalty) at each pixel.

    Parameters
    ----------
    array : numpy ndarray
        Input frame (2d array).
    fwhm : float
        Size in pixels of the FWHM.
    approximated : bool, optional
        If True, an approximated S/N map is generated.
    plot : bool, optional
        If True plots the S/N map. False by default.
    known_sources : None, tuple or tuple of tuples, optional
        To take into account existing sources. It should be a tuple of float/int
        or a tuple of tuples (of float/int) with the coordinate(s) of the known
        sources.
    nproc : int or None
        Number of processes for parallel computing.
    array2 : numpy ndarray, optional
        Additional image (e.g. processed image with negative derotation angles) 
        enabling to have more noise samples. Should have the 
        same dimensions as array.
    use2alone: bool, optional
        Whether to use array2 alone to estimate the noise (might be useful to 
        estimate the snr of extended disk features).
    verbose: bool, optional
        Whether to print timing or not.
    **kwargs : dictionary, optional
        Arguments to be passed to ``plot_frames`` to customize the plot (and to
        save it to disk).

    Returns
    -------
    snrmap : 2d numpy ndarray
        Frame with the same size as the input frame with each pixel.
    """
    if verbose:
        start_time = time_ini()

    check_array(array, dim=2, msg='array')
    sizey, sizex = array.shape
    snrmap_array = np.zeros_like(array)
    width = min(sizey, sizex) / 2 - 1.5 * fwhm
    mask = get_annulus_segments(array, (fwhm / 2) + 2, width, mode="mask")[0]
    mask = np.ma.make_mask(mask)
    # by making a bool mask *after* applying the mask to the array, we also mask
    # out zero values from the array. This logic cannot be simplified by using
    # mode="ind"!
    yy, xx = np.where(mask)
    coords = zip(xx, yy)

    if nproc is None:
        nproc = cpu_count() // 2        # Hyper-threading doubles the # of cores

    if known_sources is None:

        # proxy to S/N calculation
        if approximated:
            cy, cx = frame_center(array)
            tophat_kernel = Tophat2DKernel(fwhm / 2)
            array = convolve(array, tophat_kernel)
            width = min(sizey, sizex) / 2 - 1.5 * fwhm
            mask = get_annulus_segments(array, (fwhm / 2) + 1, width - 1,
                                        mode="mask")[0]
            mask = np.ma.make_mask(mask)
            yy, xx = np.where(mask)
            coords = [(int(x), int(y)) for (x, y) in zip(xx, yy)]
            res = pool_map(nproc, _snr_approx, array, iterable(coords), fwhm,
                           cy, cx)
            res = np.array(res)
            yy = res[:, 0]
            xx = res[:, 1]
            snr_value = res[:, 2]
            snrmap_array[yy.astype(int), xx.astype(int)] = snr_value

        # computing s/n map with Mawet+14 definition
        else:
            res = pool_map(nproc, snr, array, iterable(coords), fwhm, True,
                           array2, use2alone)
            res = np.array(res)
            yy = res[:, 0]
            xx = res[:, 1]
            snr_value = res[:, -1]
            snrmap_array[yy.astype('int'), xx.astype('int')] = snr_value

    # masking known sources
    else:
        if not isinstance(known_sources, tuple):
            raise TypeError("`known_sources` must be a tuple or tuple of "
                            "tuples")
        else:
            source_mask = np.zeros_like(array)
            if isinstance(known_sources[0], tuple):
                for coor in known_sources:
                    source_mask[coor[::-1]] = 1
            elif isinstance(known_sources[0], int):
                source_mask[known_sources[1], known_sources[0]] = 1
            else:
                raise TypeError("`known_sources` seems to have wrong type. It "
                                "must be a tuple of ints or tuple of tuples "
                                "(of ints)")

        # checking the mask with the sources
        if source_mask[source_mask == 1].shape[0] > 50:
            msg = 'Input source mask is too crowded (check its validity)'
            raise RuntimeError(msg)

        soury, sourx = np.where(source_mask == 1)
        sources = []
        coor_ann = []
        arr_masked_sources = array.copy()
        centery, centerx = frame_center(array)
        for y, x in zip(soury, sourx):
            radd = dist(centery, centerx, int(y), int(x))
            if int(radd) < centery - np.ceil(fwhm):
                sources.append((y, x))

        for source in sources:
            y, x = source
            radd = dist(centery, centerx, int(y), int(x))
            anny, annx = get_annulus_segments(array, int(radd-fwhm),
                                              int(np.round(3 * fwhm)))[0]

            ciry, cirx = draw.circle(y, x, int(np.ceil(fwhm)))
            # masking the sources positions (using the MAD of pixels in annulus)
            arr_masked_sources[ciry, cirx] = mad(array[anny, annx])

            # S/Ns of annulus without the sources
            coor_ann = [(x, y) for (x, y) in zip(annx, anny) if (x, y) not in
                        zip(cirx, ciry)]
            res = pool_map(nproc, snr, arr_masked_sources, iterable(coor_ann),
                           fwhm, True, array2, use2alone)
            res = np.array(res)
            yy_res = res[:, 0]
            xx_res = res[:, 1]
            snr_value = res[:, 4]
            snrmap_array[yy_res.astype('int'), xx_res.astype('int')] = snr_value
            coor_ann += coor_ann

        # S/Ns of the rest of the frame without the annulus
        coor_rest = [(x, y) for (x, y) in zip(xx, yy) if (x, y) not in coor_ann]
        res = pool_map(nproc, snr, array, iterable(coor_rest), fwhm, True,
                       array2, use2alone)
        res = np.array(res)
        yy_res = res[:, 0]
        xx_res = res[:, 1]
        snr_value = res[:, 4]
        snrmap_array[yy_res.astype('int'), xx_res.astype('int')] = snr_value

    if plot:
        plot_frames(snrmap_array, colorbar=True, title='S/N map', **kwargs)

    if verbose:
        print("S/N map created using {} processes".format(nproc))
        timing(start_time)
    return snrmap_array
Exemple #25
0
    def make_master_map(self, ls, fname, path=''):
        '''
        Will create a master nonlinearity map

        Parameters
        ----------
        ls : TYPE
            DESCRIPTION.

        Returns
        -------
        None.

        '''
        lsuid = [basename(f).replace('.nl.fits', '') for f in ls]
        if path == '':
            path = getcwd()
        from astropy.stats import median_absolute_deviation as mad
        from datetime import datetime
        nb_ext = fits.getdata(ls[0]).shape[0]

        nl = np.zeros((3, 4096, 4096))

        res = np.asarray([fits.getdata(f)[2, :, :] for f in ls])
        chi2 = np.asarray([fits.getdata(f)[3, :, :] for f in ls])
        md = np.median(res, axis=0)
        msd = mad(res, axis=0)
        top = md + 5 * msd
        mask_outliers = np.asarray(
            [np.where(im > top, True, False) for im in res], dtype=bool)
        mask_chi2 = np.asarray(
            [np.where(im >= 1.1, True, False) for im in chi2], dtype=bool)
        bc2 = np.all(mask_chi2, axis=0)
        nl[2] = np.asarray(bc2, dtype=int)
        mask3d = mask_outliers | mask_chi2
        for i in range(2):
            ext = np.asarray([fits.getdata(f)[i] for f in ls])
            a = np.ma.array(ext, mask=mask3d)
            mean_ext = a.mean(axis=0)
            nl[i, :, :] = mean_ext.data

        def make_file(nl, name, ls=[], path=''):
            hdu = fits.PrimaryHDU()
            #import pdb
            #pdb.set_trace()
            hdu.data = nl
            time = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")
            uid = datetime.utcnow().strftime("%y%m%d%H%M%S")
            hdu.header['DATE'] = (time, "Creation time")
            hdu.header['UNIQUEID'] = (uid, "Unique identification number.")
            hdu.header.add_comment("Built using:")
            for u in lsuid:
                hdu.header.add_comment(u)
            if len(ls) > 0:
                for f in ls:
                    hdu.header.add_comment(f)
            if path == '':
                path = getcwd()

            hdu.writeto(join(path, name), overwrite=True)

        make_file(nl, fname, path=path)
    def aspcap_residue_plot(self,
                            test_predictions,
                            test_labels,
                            test_pred_error=None,
                            test_labels_err=None):
        """
        NAME:
            aspcap_residue_plot
        PURPOSE:
            plot aspcap residue
        INPUT:
            test_predictions (ndarray): Test result from neural network
            test_labels (ndarray): Gound truth for tests result
            test_pred_error (ndarray): (Optional) 1-sigma error for tests result from Baysian neural network.
            test_labels_err (ndarray): (Optional) Ground truth for tests result
        OUTPUT:
            None, just plots to be saved
        HISTORY:
            2018-Jan-28 - Written - Henry Leung (University of Toronto)
        """

        import pylab as plt
        import numpy as np
        import seaborn as sns

        print("Start plotting residues")

        resid = test_predictions - test_labels

        # Some plotting variables for asthetics
        plt.rcParams['axes.facecolor'] = 'white'
        sns.set_style("ticks")
        plt.rcParams['axes.grid'] = True
        plt.rcParams['grid.color'] = 'gray'
        plt.rcParams['grid.alpha'] = '0.4'

        x_lab = 'ASPCAP'
        y_lab = 'astroNN'
        fullname = self.targetname

        aspcap_residue_path = os.path.join(self.fullfilepath, 'ASPCAP_residue')

        if not os.path.exists(aspcap_residue_path):
            os.makedirs(aspcap_residue_path)

        mad_labels = np.zeros(test_labels.shape[1])

        for i in range(test_labels.shape[1]):
            not9999_index = np.where(test_labels[:, i] != MAGIC_NUMBER)
            mad_labels[i] = mad((test_labels[:, i])[not9999_index], axis=0)

        if test_pred_error is None:
            # To deal with prediction from non-Bayesian Neural Network
            test_pred_error = np.zeros(test_predictions.shape)

        for i in range(self._labels_shape):
            plt.figure(figsize=(15, 11), dpi=200)
            plt.axhline(0, ls='--', c='k', lw=2)
            not9999 = np.where(test_labels[:, i] != -9999.)[0]
            plt.errorbar((test_labels[:, i])[not9999], (resid[:, i])[not9999],
                         yerr=(test_pred_error[:, i])[not9999],
                         markersize=2,
                         fmt='o',
                         ecolor='g',
                         capthick=2,
                         elinewidth=0.5)

            plt.xlabel('ASPCAP ' + target_name_conversion(fullname[i]),
                       fontsize=25)
            plt.ylabel(r'$\Delta$ ' + target_name_conversion(fullname[i]) +
                       '\n(' + y_lab + ' - ' + x_lab + ')',
                       fontsize=25)
            plt.tick_params(labelsize=20, width=1, length=10)
            if self._labels_shape == 1:
                plt.xlim([
                    np.min((test_labels[:])[not9999]),
                    np.max((test_labels[:])[not9999])
                ])
            else:
                plt.xlim([
                    np.min((test_labels[:, i])[not9999]),
                    np.max((test_labels[:, i])[not9999])
                ])
            ranges = (np.max((test_labels[:, i])[not9999]) - np.min(
                (test_labels[:, i])[not9999])) / 2
            plt.ylim([-ranges, ranges])
            bbox_props = dict(boxstyle="square,pad=0.3", fc="w", ec="k", lw=2)
            bias = np.median((resid[:, i])[not9999], axis=0)
            scatter = mad((resid[:, i])[not9999], axis=0)
            plt.figtext(0.6,
                        0.75,
                        r'$\widetilde{m}$=' + '{0:.3f}'.format(bias) +
                        r' $\widetilde{s}$=' +
                        '{0:.3f}'.format(scatter / float(mad_labels[i])) +
                        ' s=' + '{0:.3f}'.format(scatter),
                        size=25,
                        bbox=bbox_props)
            plt.tight_layout()
            plt.savefig(aspcap_residue_path + f'/{fullname[i]}_test.png')
            plt.close('all')
            plt.clf()

        if test_labels_err is not None:
            for i in range(self._labels_shape):
                plt.figure(figsize=(15, 11), dpi=200)
                plt.axhline(0, ls='--', c='k', lw=2)
                not9999 = np.where(test_labels[:, i] != -9999.)[0]

                plt.scatter((test_labels_err[:, i])[not9999],
                            (resid[:, i])[not9999],
                            s=0.7)
                plt.xlabel(r'ASPCAP Error of ' +
                           target_name_conversion(fullname[i]),
                           fontsize=25)
                plt.ylabel(r'$\Delta$ ' + target_name_conversion(fullname[i]) +
                           '\n(' + y_lab + ' - ' + x_lab + ')',
                           fontsize=25)
                plt.tick_params(labelsize=20, width=1, length=10)
                if self._labels_shape == 1:
                    plt.xlim([
                        np.percentile((test_labels_err[:])[not9999], 5),
                        np.percentile((test_labels_err[:])[not9999], 95)
                    ])
                else:
                    plt.xlim([
                        np.min((test_labels_err[:, i])[not9999]),
                        np.percentile((test_labels_err[:, i])[not9999], 90)
                    ])
                ranges = (np.percentile(
                    (resid[:, i])[not9999], 5) - np.percentile(
                        (resid[:, i])[not9999], 95))
                plt.ylim([-ranges, ranges])

                plt.tight_layout()
                plt.savefig(aspcap_residue_path +
                            f'/{fullname[i]}_test_err.png')
                plt.close('all')
                plt.clf()

        print("Finished plotting residues")
Exemple #27
0
    print list
    print ''

##################
paths = [
    '/mnt/hgfs/data/k12h02h/00000339.2012FU62.REDUCED.FIT',
    '/mnt/hgfs/data/k12h02h/stack-2.fits',
    '/mnt/hgfs/data/k12h02h/stack-4.fits',
    '/mnt/hgfs/data/k12h02h/stack-6.fits',
    '/mnt/hgfs/data/k12h02h/stack-9.fits',
    '/mnt/hgfs/data/k12h02h/stack-16.fits',
]
for path in paths:
    image = getImage(path)
    #print np.median(image), mad(image)
    bkg_sigma = 1.48 * mad(image)
    print mad(image), 5 * bkg_sigma

##################
path = '/mnt/hgfs/data/k12h02h/00000339.2012FU62.REDUCED.FIT'
index = 0
image = getImage(path, index)
stars = findStars(image)
stars.sort('flux')
stars.reverse()
list = stars[:3]

positions = zip(list['xcentroid'], list['ycentroid'])
apertures = CircularAperture(positions, r=6)
annuli = CircularAnnulus(positions, r_in=18, r_out=24)
phot_table = aperture_photometry(image, [apertures, annuli])
Exemple #28
0
        cube_empire = SpectralCube(data=hdu.data, header=hdu.header, wcs=wcs.WCS(hdu.header))
        empire_mask = np.isfinite(hdu.data)
        cube_empire = cube_empire.with_mask(empire_mask)

        channel_ratio = ((cube_degas.spectral_axis[1]
                          - cube_degas.spectral_axis[0]) / 
                         (cube_empire.spectral_axis[1]
                          - cube_empire.spectral_axis[0])).to(u.dimensionless_unscaled).value
        
        kernel = Box1DKernel(channel_ratio)
        cube_empire = cube_empire.spectral_smooth(kernel)
        cube_empire = cube_empire.spectral_interpolate(cube_degas.spectral_axis)
        cube_empire = cube_empire.reproject(cube_degas.header)
        cube_degas = cube_degas.convolve_to(cube_empire.beam)

        noise_empire = mad(cube_empire.filled_data[:].value, ignore_nan=True)
        noise_degas = mad(cube_degas.filled_data[:].value, ignore_nan=True)

        bools = mask.filled_data[:] > 1
        degas_vals = cube_degas.filled_data[bools].value
        empire_vals = cube_empire.filled_data[bools].value
        idx = np.isfinite(degas_vals) * np.isfinite(empire_vals)

        ax.plot(empire_vals * sf, degas_vals * sf,'ro', alpha=0.5, markeredgecolor='k',
                label=molecule)
        xlims = ax.get_xlim()
        ylims = ax.get_ylim()

        ax.plot([np.min([xlims[0],ylims[0]]), np.max([xlims[1], ylims[1]])],
                [np.min([xlims[0],ylims[0]]), np.max([xlims[1], ylims[1]])])
        ax.set_xlabel('EMPIRE (mK)')
Exemple #29
0
def snrmap(array, fwhm, plot=False, mode='sss', source_mask=None, nproc=None):
    """Parallel implementation of the SNR map generation function. Applies the 
    SNR function (small samples penalty) at each pixel.
    
    Parameters
    ----------
    array : array_like
        Input frame.
    fwhm : float
        Size in pixels of the FWHM.
    plot : {False, True}, bool optional
        If True plots the SNR map. 
    mode : {'sss', 'peakstddev'}, string optional
        'sss' uses the approach with the small sample statistics penalty and
        'peakstddev' uses the peak(aperture)/std(annulus) version.
    source_mask : array_like, optional
        If exists, it takes into account existing sources. The mask is a ones
        2d array, with the same size as the input frame. The centers of the 
        known sources have a zero value.
    nproc : int or None
        Number of processes for parallel computing.
    
    Returns
    -------
    snrmap : array_like
        Frame with the same size as the input frame with each pixel.
        
    """
    start_time = timeInit()
    if not array.ndim==2:
        raise TypeError('Input array is not a 2d array or image.')
    if plot:  plt.close('snr')
        
    sizey, sizex = array.shape
    snrmap = np.zeros_like(array)
    width = min(sizey,sizex)/2 - 1.5*fwhm
    mask = get_annulus(array, (fwhm/2)+1, width)
    mask = np.ma.make_mask(mask)
    yy, xx = np.where(mask)
    coords = [(x,y) for (x,y) in zip(xx,yy)]
        
    if not nproc:  
        nproc = int((cpu_count()/2))  # Hyper-threading doubles the # of cores
    
    if mode == 'sss':
        F = snr_ss
    elif mode == 'peakstddev':
        F = snr_peakstddev
    else:
        raise TypeError('\nMode not recognized.')
    
    if source_mask is None:
        pool = Pool(processes=int(nproc))                                        
        res = pool.map(eval_func_tuple, itt.izip(itt.repeat(F),itt.repeat(array),
                                                 coords, itt.repeat(fwhm),
                                                 itt.repeat(True)))       
        res = np.array(res)
        pool.close()
        yy = res[:,0]
        xx = res[:,1]
        snr = res[:,2]
        snrmap[yy.astype('int'), xx.astype('int')] = snr
    else:
        # checking the mask with the sources
        if not array.shape == source_mask.shape:
            raise RuntimeError('Source mask has wrong size.')
        if source_mask[source_mask == 0].shape[0] == 0:
            msg = 'Input source mask is empty.'
            raise RuntimeError(msg)
        if source_mask[source_mask == 0].shape[0] > 20:
            msg = 'Input source mask is too crowded. Check its validity.'
            raise RuntimeError(msg)
        
        soury, sourx = np.where(source_mask == 0)
        sources = []
        ciry = []; cirx = []; anny = []; annx = []
        array_sources = array.copy()
        centery, centerx = frame_center(array)
        for (y,x) in zip(soury,sourx):
            radd = dist(centery, centerx, y, x)
            if int(np.floor(radd)) < centery - np.ceil(fwhm):
                sources.append((y,x))
        
        for source in sources:
            y, x = source        
            radd = dist(centery, centerx, y, x)
            tempay, tempax = get_annulus(array, int(np.floor(radd-fwhm)), 
                                    int(np.ceil(2*fwhm)), output_indices=True)
            tempcy, tempcx = draw.circle(y, x, int(np.ceil(1*fwhm)))
            # masking the source position (using the MAD of pixels in annulus)
            array_sources[tempcy, tempcx] = mad(array[tempay, tempax])
            ciry += list(tempcy); cirx += list(tempcx)
            anny += list(tempay); annx += list(tempax)

        # coordinates of annulus without the sources
        coor_ann = [(y,x) for (y,x) in zip(anny, annx) if (y,x) not in zip(ciry, cirx)]

        # coordinates of the rest of the frame without the annulus
        coor_rest = [(y,x) for (y,x) in zip(yy, xx) if (y,x) not in coor_ann]
        
        pool1 = Pool(processes=int(nproc))
        res = pool1.map(eval_func_tuple, itt.izip(itt.repeat(F),itt.repeat(array),
                                                  coor_rest, itt.repeat(fwhm),
                                                  itt.repeat(True)))       
        res = np.array(res)
        pool1.close()
        yy = res[:,0]
        xx = res[:,1]
        snr = res[:,2]
        snrmap[yy.astype('int'), xx.astype('int')] = snr
        
        pool2 = Pool(processes=int(nproc))
        res = pool2.map(eval_func_tuple, itt.izip(itt.repeat(F),
                                                  itt.repeat(array_sources),
                                                  coor_ann, itt.repeat(fwhm),
                                                  itt.repeat(True)))       
        res = np.array(res)
        pool2.close()
        yy = res[:,0]
        xx = res[:,1]
        snr = res[:,2]
        snrmap[yy.astype('int'), xx.astype('int')] = snr
    
    if plot:  pp_subplots(snrmap, colorb=True, title='SNRmap')
        
    print "SNR map created using {:} processes.".format(nproc)
    timing(start_time)
    return snrmap
Exemple #30
0
	# Reconstruct the full frame from the windows	
	stackedFigure = matplotlib.pyplot.figure(figsize=(10, 10))
	matplotlib.pyplot.title("Initial 10 frame stacked image")
	boostedFullFrame = numpy.zeros((fullFrameysize, fullFramexsize))	
	fullFrame = numpy.zeros((fullFrameysize, fullFramexsize))	
	for w in allWindows:
		boostedImage = ultracamutils.percentiles(w.stackedData, 10, 99.8)
		image = w.stackedData
		xll = w.xll/w.xbin - xmin
		xsize = w.nx
		yll = w.yll/w.ybin - ymin
		ysize = w.ny
		boostedFullFrame[yll:yll+ysize, xll:xll+xsize] = fullFrame[yll:yll+ysize, xll:xll+xsize] + boostedImage
		fullFrame[yll:yll+ysize, xll:xll+xsize] = fullFrame[yll:yll+ysize, xll:xll+xsize] + image
		
		bkg_sigma = 1.48 * mad(image)
		print "bkg_sigma", bkg_sigma   
		sources = daofind(image, fwhm=4.0, threshold=3*bkg_sigma) 
		print "Num sources in this window:", len(sources)  
		w.setSourcesAvoidBorders(sources)	
		
		
	# Get the source list from this image
	# Combine the sources in all of the windows
	allSources = []
	for index, w in enumerate(allWindows):
		xll = w.xll/w.xbin - xmin
		yll = w.yll/w.ybin - ymin
		sources = w.getSources()
		positions = zip(sources['xcentroid'], sources['ycentroid'], sources['flux'])
		new_positions = [(x + xll, y + yll, flux) for (x, y, flux) in positions]
Exemple #31
0
def slice_residual():

    NS_r_XX = []
    NS_r_YY = []
    EW_r_XX = []
    EW_r_YY = []

    for tile in tiles:

        for pol in ["XX", "YY"]:

            f_tile = f"{map_dir}/{tile}{pol}_rf1{pol}_tile_maps.npz"

            try:
                map_slices = beam_slices(f_tile, fee_map, nside)

                #  pointings = ["0", "2", "4"]
                pointings = ["0"]

                for i, p in enumerate(pointings):
                    slices = map_slices[i]
                    NS_slices = slices[0]
                    fee_slice = NS_slices[1][0]
                    NS_med = NS_slices[0][0]
                    nulls = np.where(fee_slice < -30)
                    fee_slice[nulls] = np.nan
                    NS_med[nulls] = np.nan
                    NS_resi = np.array(NS_med - fee_slice)
                    if pol == "XX":
                        NS_r_XX.append(NS_resi)
                    else:
                        NS_r_YY.append(NS_resi)

                    EW_slices = slices[1]
                    fee_slice = EW_slices[1][0]
                    EW_med = EW_slices[0][0]
                    nulls = np.where(fee_slice < -30)
                    fee_slice[nulls] = np.nan
                    EW_med[nulls] = np.nan
                    EW_resi = np.array(EW_med - fee_slice)
                    if pol == "XX":
                        EW_r_XX.append(EW_resi)
                    else:
                        EW_r_YY.append(EW_resi)

            except Exception as e:
                print(e)

    NS_XX_res = np.nanmedian(np.array(NS_r_XX), axis=0)
    NS_YY_res = np.nanmedian(np.array(NS_r_YY), axis=0)
    EW_XX_res = np.nanmedian(np.array(EW_r_XX), axis=0)
    EW_YY_res = np.nanmedian(np.array(EW_r_YY), axis=0)

    NS_XX_mad = mad(np.array(NS_r_XX), axis=0)
    NS_YY_mad = mad(np.array(NS_r_YY), axis=0)
    EW_XX_mad = mad(np.array(EW_r_XX), axis=0)
    EW_YY_mad = mad(np.array(EW_r_YY), axis=0)

    # Only used to extract za arrays
    f_tile = f"{map_dir}/S06XX_rf0XX_tile_maps.npz"
    map_slices = beam_slices(f_tile, fee_map, nside)

    za = map_slices[0][0][0][2]

    NS_XX_fit = poly_fit(za, NS_XX_res, NS_XX_res, 2)
    NS_YY_fit = poly_fit(za, NS_YY_res, NS_YY_res, 2)
    EW_XX_fit = poly_fit(za, EW_XX_res, EW_XX_res, 2)
    EW_YY_fit = poly_fit(za, EW_YY_res, EW_YY_res, 2)

    # Plotting stuff
    #  plt.style.use("seaborn")

    #  nice_fonts = {
    #  "font.family": "sans-serif",
    #  "axes.labelsize": 8,
    #  "axes.titlesize": 9,
    #  "font.size": 8,
    #  "legend.fontsize": 6,
    #  "xtick.labelsize": 8,
    #  "ytick.labelsize": 8,
    #  }

    #  plt.rcParams.update(nice_fonts)

    #  plt.figure(figsize=(3.6, 2.4))

    #  colors = _spec(np.linspace(0.17, 0.9, 4))
    colors = _spec([0.14, 0.77, 0.66, 0.35])

    #  plt.figure(figsize=(6, 5))
    #  plt.errorbar(
    #  za,
    #  NS_XX_res,
    #  yerr=NS_XX_mad,
    #  fmt=".",
    #  ms=7,
    #  alpha=0.88,
    #  color=colors[0],
    #  elinewidth=1.4,
    #  capsize=1.4,
    #  capthick=1.6,
    #  )
    #  plt.errorbar(
    #  za,
    #  NS_YY_res,
    #  yerr=NS_YY_mad,
    #  fmt=".",
    #  ms=7,
    #  alpha=0.88,
    #  color=colors[1],
    #  elinewidth=1.4,
    #  capsize=1.4,
    #  capthick=1.6,
    #  )
    #  plt.errorbar(
    #  za,
    #  EW_XX_res,
    #  yerr=EW_XX_mad,
    #  fmt=".",
    #  ms=7,
    #  alpha=0.88,
    #  color=colors[2],
    #  elinewidth=1.4,
    #  capsize=1.4,
    #  capthick=1.6,
    #  )
    #  plt.errorbar(
    #  za,
    #  EW_YY_res,
    #  yerr=EW_YY_mad,
    #  fmt=".",
    #  ms=7,
    #  alpha=0.88,
    #  color=colors[3],
    #  elinewidth=1.4,
    #  capsize=1.4,
    #  capthick=1.6,
    #  )

    plt.scatter(
        za,
        NS_XX_res,
        s=16,
        alpha=0.88,
        edgecolor="black",
        linewidth=0.2,
        color=colors[0],
    )
    plt.scatter(
        za,
        NS_YY_res,
        s=16,
        alpha=0.88,
        edgecolor="black",
        linewidth=0.2,
        color=colors[1],
    )
    plt.scatter(
        za,
        EW_XX_res,
        s=16,
        alpha=0.88,
        edgecolor="black",
        linewidth=0.2,
        color=colors[2],
    )
    plt.scatter(
        za,
        EW_YY_res,
        s=16,
        alpha=0.88,
        edgecolor="black",
        linewidth=0.2,
        color=colors[3],
    )

    plt.plot(za, NS_XX_fit, label="NS_XX", linewidth=2, color=colors[0])
    plt.plot(za, NS_YY_fit, label="NS_YY", linewidth=2, color=colors[1])
    plt.plot(za, EW_XX_fit, label="EW_XX", linewidth=2, color=colors[2])
    plt.plot(za, EW_YY_fit, label="EW_YY", linewidth=2, color=colors[3])

    leg = plt.legend(loc="upper right",
                     frameon=True,
                     markerscale=4,
                     handlelength=1)
    leg.get_frame().set_facecolor("white")
    for le in leg.legendHandles:
        le.set_alpha(1)

    plt.ylim([-4, 4])
    #  plt.xlabel("Zenith Angle [degrees]")
    plt.ylabel("Residual Power [dB]")
    #  plt.tight_layout()
    #  plt.savefig("ref_rot/rot_resi.pdf")
    return plt