Пример #1
0
    def auto_correlation_seeing(im, cutout_w=15):

        plate_scale = 0.579  #arcsec/pix
        sigma_to_fwhm = 2.355

        #Set a row to NaNs, which will dominate the autocorrelation of Mimir images.
        im[513] = np.nan

        #Interpolate nans in the image, repeating until no nans remain.
        while sum(sum(np.isnan(im))) > 0:
            im = interpolate_replace_nans(im, kernel=Gaussian2DKernel(0.5))

        #Cut 80 pixels near top/bottom edges, which can dominate the fft if they have a "ski jump" feature.
        im = im[80:944, :]
        y_dim, x_dim = im.shape

        #Subtract off a simple estimate of the image background.
        im -= sigma_clipped_stats(im)[1]

        #Do auto correlation
        fft = signal.fftconvolve(im, im[::-1, ::-1], mode='same')

        #Do a cutout around the center of the fft.
        cutout = fft[int(y_dim / 2) - cutout_w:int(y_dim / 2) + cutout_w,
                     int(x_dim / 2) - cutout_w:int(x_dim / 2) + cutout_w]

        #Set the midplane of the cutout to nans and interpolate.
        cutout[cutout_w] = np.nan

        while sum(sum(np.isnan(cutout))) > 0:
            cutout = interpolate_replace_nans(cutout, Gaussian2DKernel(0.25))

        #Subtract off "background"
        cutout -= np.nanmedian(cutout)

        #Fit a 2D Gaussian to the cutout
        #Assume a seeing of 2".7, the average value measured for PINES.
        g_init = models.Gaussian2D(
            amplitude=np.nanmax(cutout),
            x_mean=cutout_w,
            y_mean=cutout_w,
            x_stddev=2.7 / plate_scale / sigma_to_fwhm * np.sqrt(2),
            y_stddev=2.7 / plate_scale / sigma_to_fwhm * np.sqrt(2))
        g_init.x_mean.fixed = True
        g_init.y_mean.fixed = True
        #Set limits on the fitted gaussians between 1".6 and 7".0
        #Factor of sqrt(2) corrects for autocorrelation of 2 gaussians.
        g_init.x_stddev.min = 1.6 / plate_scale / sigma_to_fwhm * np.sqrt(2)
        g_init.y_stddev.min = 1.6 / plate_scale / sigma_to_fwhm * np.sqrt(2)
        g_init.x_stddev.max = 7 / plate_scale / sigma_to_fwhm * np.sqrt(2)
        g_init.y_stddev.max = 7 / plate_scale / sigma_to_fwhm * np.sqrt(2)

        fit_g = fitting.LevMarLSQFitter()
        y, x = np.mgrid[:int(2 * cutout_w), :int(2 * cutout_w)]
        g = fit_g(g_init, x, y, cutout)

        #Convert to fwhm in arcsec.
        seeing_fwhm_as = g.y_stddev.value / np.sqrt(
            2) * sigma_to_fwhm * plate_scale

        return seeing_fwhm_as
Пример #2
0
def make_no_overlap_Nvalue_plots(density_arr, slope_arr, diam_bin_min,
                                 diam_bin_max, color, plottype):

    # make figure
    fig = plt.figure()
    ax = fig.add_subplot(111)

    ax.set_xlabel(r'$\mathrm{Slope}$', fontsize=16)
    ax.set_ylabel(r'$\mathrm{log(Density)}$', fontsize=16)

    x = slope_arr
    y = density_arr

    ax.scatter(x, np.log10(y), s=5, c=color, alpha=0.3, edgecolors='none')
    ax.set_ylim(-8, 0.5)
    ax.set_xlim(0, 35)

    # draw contours
    # make sure the arrays dont have NaNs
    slope_fin_idx = np.where(np.isfinite(x))[0]
    density_fin_idx = np.where(np.isfinite(y))[0]
    fin_idx = np.intersect1d(slope_fin_idx, density_fin_idx)

    xp = x[fin_idx]
    yp = y[fin_idx]

    counts, xbins, ybins = np.histogram2d(xp,
                                          np.log10(yp),
                                          bins=25,
                                          normed=False)
    # smooth counts to get smoother contours
    kernel = Gaussian2DKernel(stddev=1.4)
    counts = convolve(counts, kernel, boundary='extend')

    print "Min and max point number density values in bins", str(
        "{:.3}".format(np.min(counts))), str("{:.3}".format(np.max(counts)))
    diam_bin = str(diam_bin_min) + 'to' + str(diam_bin_max)
    levels_to_plot, cb_lw, vmin = get_levels_to_plot(diam_bin,
                                                     plottype=plottype)
    norm = mpl.colors.Normalize(vmin=vmin, vmax=max(levels_to_plot))

    c = ax.contour(counts.transpose(), levels=levels_to_plot, \
        extent=[xbins.min(), xbins.max(), ybins.min(), ybins.max()], \
        cmap=cm.viridis, linestyles='solid', linewidths=2, \
        zorder=10, norm=norm)

    # plot colorbar inside figure
    cbaxes = inset_axes(ax,
                        width='3%',
                        height='52%',
                        loc=7,
                        bbox_to_anchor=[-0.05, -0.2, 1, 1],
                        bbox_transform=ax.transAxes)
    cb = plt.colorbar(c,
                      cax=cbaxes,
                      ticks=[min(levels_to_plot),
                             max(levels_to_plot)],
                      orientation='vertical')
    cb.ax.get_children()[0].set_linewidths(cb_lw)

    # add ticks and grid
    ax.minorticks_on()
    ax.tick_params('both', width=1, length=3, which='minor')
    ax.tick_params('both', width=1, length=4.7, which='major')
    ax.grid(True)

    # save the figure
    if plottype == 'Nvalue':
        # add text on figure to indicate diameter bin
        diambinbox = TextArea(r"$\mathrm{N \geq\ }$" + str(diam_bin_min) +
                              r'$\mathrm{\, km}$',
                              textprops=dict(color='k', size=14))
        anc_diambinbox = AnchoredOffsetbox(loc=2, child=diambinbox, pad=0.0, frameon=False,\
                                             bbox_to_anchor=(0.6, 0.1),\
                                             bbox_transform=ax.transAxes, borderpad=0.0)
        ax.add_artist(anc_diambinbox)
        fig.savefig(slope_extdir + 'slope_v_density_withcontour_Nvalue' +
                    str(diam_bin_min) + 'km.png',
                    dpi=300,
                    bbox_inches='tight')

    elif plottype == 'nooverlap':
        # add text on figure to indicate diameter bin
        diambinbox = TextArea(str(diam_bin_min) + r'$\mathrm{\ to\ }$' + str(diam_bin_max) + r'$\mathrm{\,km}$', \
            textprops=dict(color='k', size=14))
        anc_diambinbox = AnchoredOffsetbox(loc=2, child=diambinbox, pad=0.0, frameon=False,\
                                             bbox_to_anchor=(0.6, 0.1),\
                                             bbox_transform=ax.transAxes, borderpad=0.0)
        ax.add_artist(anc_diambinbox)

        fig.savefig(slope_extdir + 'slope_v_density_withcontour_nooverlap' +
                    diam_bin + 'km.png',
                    dpi=300,
                    bbox_inches='tight')

    plt.cla()
    plt.clf()
    plt.close()

    return None
Пример #3
0
def make_source_catalog(model,
                        kernel_fwhm,
                        kernel_xsize,
                        kernel_ysize,
                        snr_threshold,
                        npixels,
                        deblend_nlevels=32,
                        deblend_contrast=0.001,
                        deblend_mode='exponential',
                        connectivity=8,
                        deblend=False):
    """
    Create a final catalog of source photometry and morphologies.

    Parameters
    ----------
    model : `ImageModel`
        The input `ImageModel` of a single drizzled image.  The
        input image is assumed to be background subtracted.

    kernel_fwhm : float
        The full-width at half-maximum (FWHM) of the 2D Gaussian kernel
        used to filter the image before thresholding.  Filtering the
        image will smooth the noise and maximize detectability of
        objects with a shape similar to the kernel.

    kernel_xsize : odd int
        The size in the x dimension (columns) of the kernel array.

    kernel_ysize : odd int
        The size in the y dimension (row) of the kernel array.

    snr_threshold : float
        The signal-to-noise ratio per pixel above the ``background`` for
        which to consider a pixel as possibly being part of a source.

    npixels : int
        The number of connected pixels, each greater than the threshold
        that an object must have to be detected.  ``npixels`` must be a
        positive integer.

    deblend_nlevels : int, optional
        The number of multi-thresholding levels to use for deblending
        sources.  Each source will be re-thresholded at
        ``deblend_nlevels``, spaced exponentially or linearly (see the
        ``deblend_mode`` keyword), between its minimum and maximum
        values within the source segment.

    deblend_contrast : float, optional
        The fraction of the total (blended) source flux that a local
        peak must have to be considered as a separate object.
        ``deblend_contrast`` must be between 0 and 1, inclusive.  If
        ``deblend_contrast = 0`` then every local peak will be made a
        separate object (maximum deblending).  If ``deblend_contrast =
        1`` then no deblending will occur.  The default is 0.001, which
        will deblend sources with a magnitude differences of about 7.5.

    deblend_mode : {'exponential', 'linear'}, optional
        The mode used in defining the spacing between the
        multi-thresholding levels (see the ``deblend_nlevels`` keyword)
        when deblending sources.

    connectivity : {4, 8}, optional
        The type of pixel connectivity used in determining how pixels
        are grouped into a detected source.  The options are 4 or 8
        (default).  4-connected pixels touch along their edges.
        8-connected pixels touch along their edges or corners.  For
        reference, SExtractor uses 8-connected pixels.

    deblend : bool, optional
        Whether to deblend overlapping sources.  Source deblending
        requires scikit-image.

    Returns
    -------
    catalog : `~astropy.Table` or `None`
        An astropy Table containing the source photometry and
        morphologies.  If no sources are detected then `None` is
        returned.
    """

    if not isinstance(model, ImageModel):
        raise ValueError('The input model must be a ImageModel.')

    # Use this when model.wht contains an IVM map
    # Calculate "background-only" error assuming the weight image is an
    # inverse-variance map (IVM).  The weight image is clipped because it
    # may contain zeros.
    # bkg_error = np.sqrt(1.0 / np.clip(model.wht, 1.0e-20, 1.0e20))
    # threshold = snr_threshold * bkg_error

    # Estimate the 1-sigma noise in the image empirically because model.wht
    # does not yet contain an IVM map
    mask = (model.wht == 0)
    data_mean, data_median, data_std = sigma_clipped_stats(model.data,
                                                           mask=mask,
                                                           sigma=3.0,
                                                           maxiters=10)
    threshold = data_median + (data_std * snr_threshold)

    sigma = kernel_fwhm * gaussian_fwhm_to_sigma
    kernel = Gaussian2DKernel(sigma, x_size=kernel_xsize, y_size=kernel_ysize)
    kernel.normalize()

    segm = photutils.detect_sources(model.data,
                                    threshold,
                                    npixels=npixels,
                                    filter_kernel=kernel,
                                    connectivity=connectivity)

    # segm=None for photutils >= 0.7; segm.nlabels == 0 for photutils < 0.7
    if segm is None or segm.nlabels == 0:
        return None

    # source deblending requires scikit-image
    if deblend:
        segm = photutils.deblend_sources(model.data,
                                         segm,
                                         npixels=npixels,
                                         filter_kernel=kernel,
                                         nlevels=deblend_nlevels,
                                         contrast=deblend_contrast,
                                         mode=deblend_mode,
                                         connectivity=connectivity,
                                         relabel=True)

    # Calculate total error, including source Poisson noise.
    # This calculation assumes that the data and bkg_error images are in
    # units of electron/s.  Poisson noise is not included for pixels
    # where data < 0.
    exptime = model.meta.resample.product_exposure_time  # total exptime
    # total_error = np.sqrt(bkg_error**2 +
    #                       np.maximum(model.data / exptime, 0))
    total_error = np.sqrt(data_std**2 + np.maximum(model.data / exptime, 0))

    wcs = model.get_fits_wcs()
    source_props = photutils.source_properties(model.data,
                                               segm,
                                               error=total_error,
                                               filter_kernel=kernel,
                                               wcs=wcs)

    columns = [
        'id', 'xcentroid', 'ycentroid', 'sky_centroid', 'area', 'source_sum',
        'source_sum_err', 'semimajor_axis_sigma', 'semiminor_axis_sigma',
        'orientation', 'sky_bbox_ll', 'sky_bbox_ul', 'sky_bbox_lr',
        'sky_bbox_ur'
    ]
    catalog = source_props.to_table(columns=columns)

    # convert orientation to degrees
    orient_deg = catalog['orientation'].to(u.deg)
    catalog.replace_column('orientation', orient_deg)

    # define orientation position angle
    rot = _get_rotation(wcs)
    catalog['orientation_sky'] = ((270. - rot + catalog['orientation'].value) *
                                  u.deg)

    # define flux in microJanskys
    nsources = len(catalog)
    pixelarea = model.meta.photometry.pixelarea_arcsecsq
    if pixelarea is None:
        micro_Jy = np.full(nsources, np.nan)
    else:
        micro_Jy = (catalog['source_sum'] * MJSR_TO_UJA2 *
                    model.meta.photometry.pixelarea_arcsecsq)

    # define AB mag
    abmag = np.full(nsources, np.nan)
    mask = np.isfinite(micro_Jy)
    abmag[mask] = -2.5 * np.log10(micro_Jy[mask]) + 23.9
    catalog['abmag'] = abmag

    # define AB mag error
    # assuming SNR >> 1 (otherwise abmag_error is asymmetric)
    abmag_error = (2.5 * np.log10(np.e) * catalog['source_sum_err'] /
                   catalog['source_sum'])
    abmag_error[~mask] = np.nan
    catalog['abmag_error'] = abmag_error

    return catalog
Пример #4
0
def _smooth_t(t):
    k = np.zeros((1, 1, 13, 13))
    k[:, :, ...] = Gaussian2DKernel(1.5).array
    kk = T.cast(k, theano.config.floatX)
    st = conv(t, kk, border_mode=(6, 6))
    return st
Пример #5
0
def merge_fits_files(infolder=None,
                     infiles=None,
                     suffix='',
                     fout=None,
                     method='mean',
                     prefix='',
                     sigma=3,
                     target=None,
                     filt=None,
                     rotangle=None,
                     northup=False,
                     refbox=20,
                     fitmeth='fastgauss',
                     refsign=1,
                     imgoffsetangle=None,
                     verbose=False,
                     tolrot=0.01,
                     pfov=None,
                     replace_nan_with='median',
                     instrument=None):
    """
    Take either a folder full of single (chop/nod) frames or a given list of
    files and simply average them into one merged file
    """

    if infolder is not None and infiles is None:
        infiles = [
            ff for ff in os.listdir(infolder)
            if (ff.endswith(suffix + '.fits')) & (ff.startswith(prefix))
        ]

    if infolder is None:
        infolder = ''

    nfiles = len(infiles)
    ima = []
    shapes = []
    rots = []

    ffull = infolder + "/" + infiles[0]
    hdu = fits.open(ffull)
    head0 = hdu[0].header
    hdu.close()

    if instrument is None:
        instrument = head0['INSTRUME']
        print('Found instrument: ', instrument)

    if instrument == 'VISIR':
        if imgoffsetangle is None:
            imgoffsetangle = _vp.imgoffsetangle
            rotsense = _vp.rotsense
            rotoffset = _vp.rotoffset

    elif (instrument == 'NAOS+CONICA') | (instrument == 'NACO'):
        imgoffsetangle = 0  # apparently correct?
        rotsense = -1  # for NACO the ADA.POSANG really gives the PA on sky
        rotoffset = 0

    elif instrument == "ISAAC":
        imgoffsetangle = _ip.imgoffsetangle
        rotsense = _ip.rotsense
        rotoffset = _ip.rotoffset

    print('Found n potential files: ', nfiles)

    if verbose:
        print("MERGE_FITS_FILES: fitmeth: ", fitmeth)
        print("MERGE_FITS_FILES: refbox: ", refbox)
        print("MERGE_FITS_FILES: refsign: ", refsign)

    for i in range(nfiles):

        ffull = infolder + "/" + infiles[i]
        hdu = fits.open(ffull)

        if verbose:
            print("MERGE_FITS_FILES: i, infiles[i]: ", i, infiles[i])

        head = hdu[0].header
        rot = head['HIERARCH ESO ADA POSANG']

        if "HIERARCH ESO TEL ROT ALTAZTRACK" in head:
            pupiltrack = (head["HIERARCH ESO TEL ROT ALTAZTRACK"])
        else:
            pupiltrack = False

        # --- if pupil tracking is on then the parang in the VISIR fits-header is
        #     not normalised by the offset angle of the VISIR imager with respect
        #     to the adapter/rotator
        if pupiltrack:
            rot = rot - imgoffsetangle

        rot += rotoffset

        if verbose:
            print("MERGE_FITS_FILES: rot: ", rot)

        if target is not None:
            targ = head['HIERARCH ESO OBS TARG NAME']
            if str(targ) != target:
                continue
        if filt is not None:
            filtf = head['HIERARCH ESO INS FILT1 NAME']
            if str(filtf) != filt:
                continue
        if rotangle is not None:
            if rot != rotangle:
                continue

        im = hdu[0].data
        hdu.close()

        # --- rotate images if Northup is requested to be up
        if np.abs(rot) > tolrot and northup:

            # --- ndimage is not compaticble with nans, so we need to replace them
            print(
                "MERGE_FITS_FILES: Encountered NaNs not compatible with " +
                "rotation. Replacing with ", replace_nan_with)

            if ((type(replace_nan_with) == float) |
                (type(replace_nan_with) == int)):
                im[np.isnan(im)] = replace_nan_with

            elif replace_nan_with == 'median':
                im[np.isnan(im)] = np.nanmedian(im)

            # --- WARNING: does not work with large NaN areas at the borders
            elif replace_nan_with == 'interpol':
                kernel = Gaussian2DKernel(stddev=1)
                im = interpolate_replace_nans(im, kernel)

#            print(np.isnan(im).any())
#
#            plt.imshow(im, origin='bottom', interpolation='nearest',
#                   norm=LogNorm())
#            plt.title(str(i)+' - after replacing NaNs, before rotation')
#            plt.show()

            im = ndimage.interpolation.rotate(im, rotsense * rot, order=3)

#            plt.imshow(im, origin='bottom', interpolation='nearest',
#                       norm=LogNorm())
#            plt.show()

        ima.append(im)
        shapes.append(np.shape(im))
        rots.append(rot)

    shapes = np.array(shapes)
    n = len(ima)

    if verbose:
        print("MERGE_FITS_FILES: n: ", n)

    # --- after rotating the images probably have different sizes
    if northup:
        minsize = [np.min(shapes[:, 0]), np.min(shapes[:, 1])]
        for i in range(n):

            if verbose:
                print("MERGE_FITS_FILES: i: ", i)
                print("MERGE_FITS_FILES: shapes: ", shapes[i])
                print("MERGE_FITS_FILES: argmax: ",
                      np.unravel_index(np.nanargmax(ima[i]), shapes[i]))

                plt.imshow(ima[i],
                           origin='bottom',
                           interpolation='nearest',
                           norm=LogNorm())
                plt.title(
                    str(i) + ' - rot ' + str(rots[i]) +
                    ' - before centered crop')
                plt.show()

            fit, _, _ = _find_source(ima[i],
                                     method=fitmeth,
                                     searchbox=refbox,
                                     fitbox=refbox,
                                     sign=refsign,
                                     verbose=verbose,
                                     plot=verbose)

            ima[i] = _crop_image(ima[i], box=minsize, cenpos=fit[2:4])

            if verbose:
                plt.imshow(ima[i],
                           origin='bottom',
                           interpolation='nearest',
                           norm=LogNorm())

                plt.title(str(i) + ' - rot ' + str(rots[i]))
                plt.show()
                print(i, fit)

        # --- if WCS is present in the header, it needs to be updated to
        if "CTYPE1" in head0:
            if head0["CTYPE1"] == "RA---TAN":
                if pfov is None:
                    pfov = _
                    if "HIERARCH ESO INS PFOV" in head0:
                        pfov = float(head["HIERARCH ESO INS PFOV"])  # VISIR
                    else:
                        pfov = float(head["HIERARCH ESO INS PIXSCALE"])  # NACO

                ra = head0["HIERARCH ESO TEL TARG ALPHA"]
                sec = ra % 100
                min = (ra % 10000 - sec) / 100
                hour = (int(ra) / 10000)
                ra_deg = 15 * (hour + min / 60.0 + sec / 3600.0)
                # print(ra_deg)

                dec = head0["HIERARCH ESO TEL TARG DELTA"]
                sec = dec % 100
                min = (dec % 10000 - sec) / 100
                deg = (int(dec) / 10000)
                if deg < 0:
                    dec_deg = deg - min / 60.0 - sec / 3600.0
                else:
                    dec_deg = deg + min / 60.0 + sec / 3600.0
                # print(ra_deg)

                head0["CRPIX1"] = minsize[1] * 0.5
                head0["CRPIX2"] = minsize[0] * 0.5
                head0["CRVAL1"] = ra_deg
                head0["CRVAL2"] = dec_deg
                if "CD1_1" in head0:
                    del head0["CD1_1"]
                    del head0["CD1_2"]
                    del head0["CD2_1"]
                    del head0["CD2_2"]
                head0["CDELT1"] = -pfov / 3600.0
                head0["CDELT2"] = pfov / 3600.0

    ima = np.array(ima)
    #print(np.shape(ima))

    if method == 'mean':
        outim = np.nanmean(ima, axis=0)

    if method == 'median':
        outim = np.nanmedian(ima, axis=0)

    if method == 'sigmaclip':
        outim = np.array(
            np.mean(sigma_clip(ima, sigma=sigma, axis=0, maxiters=2), axis=0))

    print(" - " + str(n) + " images combined.")

    if fout is not None:
        fits.writeto(fout, outim, head0, overwrite=True)

    return (outim)
Пример #6
0
        (pyspeckit.spectrum.models.ammonia.freq_dict['fourfour']/1e9))**2 *
        1./cube44.header.get('BMAJ')/3600. * 1./cube44.header.get('BMIN')/3600. )
cube44.unit = "K"

# Compute an error map.  We use the 1-1 errors for all 3 because they're
# essentially the same, but you could use a different error map for each
# frequency
oneonemomentfn = 'hotclump_11.cube_r0.5_rerun.image.moment_linefree.fits'
errmap11 = (pyfits.getdata(oneonemomentfn).squeeze() * 13.6 *
            (300.0 /
             (pyspeckit.spectrum.models.ammonia.freq_dict['oneone']/1e9))**2
            * 1./cube11.header.get('BMAJ')/3600. *
            1./cube11.header.get('BMIN')/3600.)
# Interpolate errors across NaN pixels
errmap11[errmap11 != errmap11] = convolve_fft(errmap11,
                                              Gaussian2DKernel(3),
                                              interpolate_nan=True)[errmap11 != errmap11]

# Stack the cubes into one big cube.  The X-axis is no longer linear: there
# will be jumps from 1-1 to 2-2 to 4-4.  
cubes = pyspeckit.CubeStack([cube11,cube22,cube44], maskmap=mask)
cubes.unit = "K"

# Make a "moment map" to contain the initial guesses
# If you've already fit the cube, just re-load the saved version
# otherwise, re-fit it
if os.path.exists('hot_momentcube.fits'):
    momentcubefile = pyfits.open('hot_momentcube.fits')
    momentcube = momentcubefile[0].data
else:
    cube11.mapplot()
Пример #7
0
def rtModel_spatialconv_rebin(infil, outfil,spatial_scale_xy=[0.679,0.290],zgal=0.6942,\
                              sm_fwhm_arcsec=1.634, outroot=None):
    '''Convolve radiative transfer model cube.  Originally from read_spec_output.read_fullfits_conv_rebin

    Inputs
    ------
    infil : unprocessed RT model fits file (the cube to convolve)

    outroot : str
    Filename (before extension) of output file
    '''

    kpc_per_arcsec = cosmo.kpc_proper_per_arcmin(zgal).to(u.kpc /
                                                          u.arcsec).value
    sm_fwhm = sm_fwhm_arcsec * kpc_per_arcsec  # in kpc

    #sm_fwhm = 7.209      # in kpc  -- 1" arcsec at z=0.6942
    #sm_fwhm = 9.360      # in kpc  -- 1.277" arcsec at z=0.6942 (Planck15)
    #sm_fwhm = 11.977  # in kpc  -- 1.634" arcsec at z=0.6942 (Planck15)

    xobsbin = spatial_scale_xy[
        0] * kpc_per_arcsec  # in kpc -- about 0.7" arcsec at z=0.6942
    yobsbin = spatial_scale_xy[
        1] * kpc_per_arcsec  # in kpc -- about 0.3" arcsec at z=0.6942

    pad_factor = 1.0  # will use pad_factor * nx or pad_factor * ny to pad edges of the cube

    if (infil == None):
        infil = 'spec.fits'

    if (outroot == None):
        outroot = 'spec'

    ### Declare filename directly instead
    #outfil = outroot+'_conv_rebin.fits'

    hdu = fits.open(infil)
    data = hdu[0].data
    wave = hdu[1].data
    #dwv = np.abs(wave[1] - wave[0])
    x_arr = hdu[2].data  # physical distance
    dx = np.abs(x_arr[1] - x_arr[0])  # bin size in kpc

    ny, nx, nlam = data.shape

    # Make padded cube
    pad_x = int(pad_factor * nx)
    pad_y = int(pad_factor * ny)
    data_pad = np.pad(data, ((pad_y, pad_y), (pad_x, pad_x), (0, 0)),
                      mode='constant',
                      constant_values=0)
    pny, pnx, pnlam = data_pad.shape
    tot_extent = pnx * dx  # total extent of model plus pad in kpc

    # Remove continuum source
    #r_gal = 0.1      # extent of galaxy in kpc
    #mid_idx = int(pnx/2)
    #gal_sz = np.ceil(r_gal/dx)
    #data_pad[int((mid_idx-gal_sz)):int((mid_idx+gal_sz)),int((mid_idx-gal_sz)):int((mid_idx+gal_sz)),:] = 0.0

    # Define kernel
    pix_fwhm = sm_fwhm / dx
    pix_stddev = pix_fwhm / (2.0 * (2.0 * np.log(2.0))**0.5)
    kernel = Gaussian2DKernel(pix_stddev)

    # Do we need to trim before rebinning (probably)
    newshape_x = int(tot_extent / xobsbin)
    newshape_y = int(tot_extent / yobsbin)
    quot_x = pnx / newshape_x
    quot_y = pny / newshape_y
    if pnx % newshape_x != 0:
        xtrimpx = pnx % newshape_x
        xtr1 = int(xtrimpx / 2)
        slx = np.s_[xtr1:-(xtrimpx - xtr1)]
    else:
        slx = np.s_[:]
    if pny % newshape_y != 0:
        ytrimpx = pny % newshape_y
        ytr1 = int(ytrimpx / 2)
        sly = np.s_[ytr1:-(ytrimpx - ytr1)]
    else:
        sly = np.s_[:]
    data_pad = data_pad[sly, slx, :]
    rb_conv_cube = np.zeros((newshape_y, newshape_x, pnlam))

    # Convolve and rebin
    # New dimensions must divide old ones
    print("Dimension check -- ")
    print("Original array is ", pnx, " by ", pny)
    print("New shape is ", newshape_x, " by ", newshape_y)

    for ilam in range(pnlam):
        conv = convolve_fft(data_pad[:, :, ilam],
                            kernel,
                            normalize_kernel=True)
        rb_conv_cube[:, :, ilam] = utils.bin_ndarray(conv,
                                                     (newshape_y, newshape_x),
                                                     operation='sum')
        print("Convolving wavelength ", wave[ilam], "; index = ", ilam)

    print("Transposing cube to (y,x,lam)")
    rb_conv_cube = np.transpose(rb_conv_cube, axes=(1, 0, 2))

    # Write fits file
    hdulist = None
    arr_list = [data_pad, rb_conv_cube, wave, dx]

    for arr in arr_list:
        if hdulist is None:
            hdulist = fits.HDUList([fits.PrimaryHDU(arr)])
        else:
            hdulist.append(fits.ImageHDU(arr))

    hdulist.writeto(outfil, overwrite=True)
Пример #8
0
def sim_sameField(location, mode='moffat', numIms=100, bkg_mag=22.5, fwhm_min=3, fwhm_max=6, 
                  rot_min=-2.5, rot_max=2.5, shift_min=-2, shift_max=2, scale_mult=(0,1.5),
                  scale_add=(-20,50), zero_point=25):
    '''Test **OASIS**'s ability to handle frame-by-frame variations in astronomical data and filter out false-positive sources. The procedure of the simulation is as follows:
        
        1. Copies a random science image from the specified dataset to the **simulations** directory.
        2. A source catalog of the chosen science image is made, containing information on each source's centroid location and total flux.
        3. Using this source catalog, simulations of the chosen science image are made, all with constant source flux and location, but with different backgrounds, seeing, and pointing.
        4. The set of simulated images are sent through the **OASIS Pipeline**.
        5. Low numbers of detected sources signifies a successful simulation. There are no variable objects in the simulated images, so ideally zero sources should be detected by **OASIS**.
        
        :param str location: Path of data file tree (contains the **configs**, **data**, **psf**, **residuals**, **sources**, **templates** directories). Use a comma-separated list for mapping to multiple datasets.
        :param str mode: Simulation mode. Method by which simulated images are made. All images are given a uniform background, then smeared according to Poisson statistics.
        
            * *moffat* (default): Sources are convolved with a 2D Moffat kernel.
            * *gauss*: Sources are convolved with a symmetric 2D Gaussian kernel.
            * *real*: The actual PSF model of the chosen science image is used as the convolution kernel.
            * *sky*: AstrOmatic program ``SkyMaker`` (Bertin) is used to make simulated images.
            
        :param int numIms (default=100): Number of simulated images to make.
        :param float bkg_mag: Average background level in mags. Actual simulated background levels are chosen to be a random value within the interval :math:`[bkg\_mag-1.5, bkg\_mag+1.5]`.
        :param float fwhm_min: Minimum FWHM of simulated images in pixels.
        :param float fwhm_max: Maximum FWHM of simulated images in pixels.
        :param float rot_min: Lower bound on angle of rotation in degrees.
        :param float rot_max: Upper bound on angle of rotation in degrees.
        :param float shift_min: Lower bound on (X,Y) shift in pixels.
        :param float shift_max: Upper bound on (X,Y) shift in pixels.
        :param tuple scale_mult: Interval of acceptable multiplicative scale factors.
        :param tuple scale_add: Interval of acceptable additive scale factors.
        :param float zero_point: Zero point magnitude.
        :returns: Standard **OASIS Pipeline** output, residual frames located in **residuals** and source catalogs located in **sources**.
        
    '''
    ref_im = glob.glob(location + '/data/*_ref_A_.fits')
    if os.path.exists(location) == False:
        print("-> Error: Problem with path name(s)-- make sure paths exist and are entered correctly\n-> Exiting...")
        sys.exit()
    if len(ref_im) != 1:
        print("-> Error: Problem with number of reference images\n-> Exiting...\n")
        sys.exit()
    ref_im = ref_im[0]
    ref_fwhm = fwhm(ref_im)
    path_splits = ref_im.split('/')
    image_name = path_splits[-1]
    sim_loc = location.replace('targets', 'simulations')
    len_loc = len(loc.split('/'))
    tar = path_splits[len_loc+2]
    copy_to_sim(tar, image=ref_im, mode='samefield')
    ref_psf = glob.glob("%s/psf/*_ref_A_.psf" % (sim_loc))
    if len(ref_psf) != 1:
        print("-> Error: Problem with number of reference PSF files\n-> Exiting...\n")
        sys.exit()
    try:
        clear_contents(sim_loc)
    except:
        pass
    images = glob.glob("%s/data/*.fits" % (sim_loc))
    ref_im_sim = ref_im.replace("targets", "simulations")
#delete all original images except reference
    for i in images:
        name = i.split('/')[-1]
        if name != image_name:
            os.remove(i)
#create configs directory if none exists
    create_configs(sim_loc)
#make source catalog of reference using SExtractor
    sim_config = "%s/configs/default_sim.sex" % (sim_loc)
    sim_params = "%s/configs/default_param_sim.sex" % (sim_loc)
    with  open(sim_config, 'r') as conf:
        lines = conf.readlines()
    lines[6] = "CATALOG_NAME" + "        " + "%s/data/reference.cat" % (sim_loc) + "\n"
    lines[9] = "PARAMETERS_NAME" + "        " + sim_params + "\n"
    lines[22] = "FILTER_NAME" + "        " + "%s/configs/default.conv" % (sim_loc) + "\n"
    lines[70] = "SEEING_FWHM" + "        " + str(ref_fwhm) + "\n"
    lines[127] = "PSF_NAME" + "        " + ref_psf[0] + "\n"
    with open(sim_config, 'w') as conf_write:
        conf_write.writelines(lines)
    os.system("sextractor %s[0] -c %s" % (ref_im_sim, sim_config))
#extract x_pos, y_pos, and fluxes from SExtractor catalog
    ref_cat = "%s/data/reference.cat" % (sim_loc)
    with open(ref_cat, 'r') as cat:
        cat_lines = cat.readlines()
#get simulated image's metadata
    ref_hdu = fits.open(ref_im_sim)
    ref_data = ref_hdu[0].data
    ref_header = ref_hdu[0].header
    ref_mask = ref_hdu[1].data
    try: weight_check = fits.getval(ref_im_sim, 'WEIGHT')
    except: weight_check = 'N'
    if weight_check == 'Y':
        ref_mask = (ref_mask-1)*-1
    ref_mask = ref_mask.astype(np.int64)
    ref_hdu.close()
    from astropy.stats import sigma_clipped_stats
    mean, median, std = sigma_clipped_stats(ref_data, sigma=3.0)
#extract simulated image's source information from SExtractor catalog
    x_pos = []
    y_pos = []
    flux = []
    sources = {}
    for c in cat_lines:
        splits = c.split()
        if splits[0] != '#':
            flux.append(float(splits[0]))
            x_pos.append(round(float(splits[3])))
            y_pos.append(round(float(splits[4])))
            sources.update({float(splits[0]) : (round(float(splits[3])), round(float(splits[4])))})
    flux_ordered = sorted(sources)
    flux_iter = round(len(flux)*0.99)
    flux_sim = flux_ordered[flux_iter]
    xy_sim = sources[flux_sim]
#if mode is set to use SkyMaker for making the simulations, configure SkyMaker
    if mode == 'sky':
        mags = []
        for f in flux:
            mags.append((28-(np.log(f))))
        with open("%s/configs/sky_list.txt" % (sim_loc), "w+") as sky_list:
            for i in range(len(flux)):
                sky_list.write("100 %.3f %.3f %.3f\n" % (x_pos[i], y_pos[i], mags[i]))
        #get pixel scale of reference image
        pixscale = float(ref_header['PIXSCALE'])
        #define oversampling
        oversample = pixscale*25
        #define sky.config location
        sky_config = "%s/configs/sky.config" % (sim_loc)
#start making fake images
    print("\n-> Making simulated images...")
    for n in tqdm(range(numIms)):
#define image name
        if n == 0:
            image_name = '%s/data/%d_ref_A_.fits' % (sim_loc, n)
        else:
            image_name = '%s/data/%d_N_.fits' % (sim_loc, n)
#for each image: make sources w/ random fwhm b/w (3,6), rotate/zoom, shift, add a different gaussian dist. of noise, change scale linearly, poisson smear
        #define FWHM of simulation
        image_fwhm = ((fwhm_max-fwhm_min) * np.random.random()) + fwhm_min
        #based on the mode chosen, create the corresponding convolution kernel and make simulated image
        if mode != 'sky':
            if mode == 'moffat':
                moffat_kernel_1 = Moffat2DKernel(gamma=make_stars.get_moffat_gamma(image_fwhm), alpha=7)
                moffat_kernel_2 = Moffat2DKernel(gamma=make_stars.get_moffat_gamma(image_fwhm), alpha=2)
                conv_kernel = (0.8*moffat_kernel_1) + (0.2*moffat_kernel_2)
            elif mode == 'gauss':
                gaussian_kernel_1 = Gaussian2DKernel(x_stddev=(image_fwhm/2.355), y_stddev=(image_fwhm/2.355))
                gaussian_kernel_2 = Gaussian2DKernel(x_stddev=((image_fwhm*2)/2.355), y_stddev=((image_fwhm*2)/2.355))
                conv_kernel = (0.9*gaussian_kernel_1) + (0.1*gaussian_kernel_2)
            elif mode == 'real':
                conv_kernel = get_first_model(ref_im)
            try:
                conv_kernel /= np.sum(conv_kernel)
            except:
                pass
            flux_variable = np.array(flux) * np.random.random() * 2
            image = make_stars.make_image(ref_data.shape[0], ref_data.shape[1], 
                                      x_loc=y_pos, y_loc=x_pos, fluxes=flux_variable, psf=[conv_kernel])
        #if mode is set to 'sky' use SkyMaker to make simulated image
        elif mode == 'sky':
            bkg_Mag = (1.5*np.random.random()) + bkg_mag
            image_fwhm_arcsec = image_fwhm*pixscale
            with open(sky_config, 'r') as sky:
                sky_lines = sky.readlines()
            sky_lines[6] = "IMAGE_NAME" + "        " + image_name + "\n"
            sky_lines[7] = "IMAGE_SIZE" + "        " + str("%d, %d" % (ref_data.shape[1], ref_data.shape[0])) + "\n"
            sky_lines[19] = "SATUR_LEVEL" + "        " + str(ref_header['SATURATE']) + "\n"
            sky_lines[21] = "EXPOSURE_TIME" + "        " + str(ref_header['EXPTIME']) + "\n"
            sky_lines[26] = "PIXEL_SIZE" + "        " + str(pixscale) + "\n"
            sky_lines[34] = "SEEING_FWHM" + "        " + str(image_fwhm_arcsec) + "\n"
            sky_lines[37] = "PSF_OVERSAMP" + "        " + str(oversample) + "\n"
            sky_lines[65] = "BACK_MAG" + "        " + str(bkg_Mag) + "\n"
            with open(sky_config, 'w') as sky:
                sky.writelines(sky_lines)
            os.system("sky %s/configs/sky_list.txt -c %s" % (sim_loc, sky_config))
            try:
                os.remove("%s/data/%s.list" % (sim_loc, image_name[:-5]))
            except:
                pass
            image = fits.getdata(image_name)
        else:
            print("-> Error: Please enter a valid mode (gauss, moffat, sky, real)\n-> Exiting...")
            sys.exit()
        #now we start the warping of each simulation
        #first rotate/zoom (angle is random b/w 0 and 30 degrees, zoom is random b/w 0 and 2)
        if n != 0:
            #define initial mask for each simulation
            Mask = np.zeros(image.shape)
            rot_angle = ((rot_max-rot_min)*np.random.random())+rot_min
            dx = (shift_max-shift_min) * np.random.random() - shift_min
            dy = (shift_max-shift_min) * np.random.random() - shift_min
            image = rotate(image, rot_angle, reshape=False)
            image = shift(image, [dx,dy])
            Mask = rotate(ref_mask, rot_angle, reshape=False, cval=1)
            Mask = shift(Mask, [dx,dy], cval=1)
        else:
            Mask = ref_mask
        #for non-SkyMaker simulations, add in a random background, poisson smear the image, and rescale it
        if mode != 'sky':
            #add constant background
            bkg_loc = 2.512**(zero_point - bkg_mag)
            bkg_scl = ((std+5)-(std-5))*np.random.random()+(std-5)
            bkg = np.random.normal(loc=bkg_loc, scale=bkg_scl, size=image.shape)
            image = np.add(image, bkg)
            #poisson smear
            negative_image = np.zeros(image.shape)
            negative_image[:] = image[:]
            image[image < 0] = 0
            negative_image[negative_image > 0] = 0
            image = np.random.poisson(image)
            image = image.astype(np.float64)
            negative_image *= -1
            negative_image = np.random.poisson(negative_image)
            negative_image = negative_image.astype(np.float64)
            negative_image *= -1
            image += negative_image
            #rescale image linearly
            a = ((scale_mult[1] - scale_mult[0])*np.random.random()) + scale_mult[0]
            b = (scale_add[1] - scale_add[0])*np.random.random() - scale_add[0]
            image *= a
            image += b
        #write new image to data folder in target's simulations folder
        newHDUData = fits.PrimaryHDU(image, header=ref_header)
        newHDUMask = fits.ImageHDU(Mask)
        newHDUList = fits.HDUList([newHDUData, newHDUMask])
        newHDUList.writeto(image_name, overwrite=True)
        newHDU = fits.open(image_name, mode='update')
        (newHDU[0].header).set('WEIGHT', 'N')
        (newHDU[0].header).set('SCALED', 'N')
        newHDU.close()
    os.system("mv %s %s" % (ref_im_sim, sim_loc))
    os.system("mv %s %s" % (ref_psf, sim_loc))
    os.system("mv %s %s.cat" % (ref_psf[:-4], sim_loc))
    if mode == 'sky':
        sim_lists = glob.glob("%s/data/*.list" % (sim_loc))
        for sl in sim_lists:
            os.remove(sl)
    pipeline.pipeline_run_sim(sim_loc, sim=False)
    print(flux_iter, flux_sim, xy_sim)
Пример #9
0
    def internal(fig, a, sim, times, unit_values):
        from plutokore import radio
        from plutokore.plot import create_colorbar
        from numba import jit
        from astropy.convolution import convolve, Gaussian2DKernel
        from astropy.cosmology import Planck15 as cosmo

        @jit(nopython=True)
        def raytrace_surface_brightness(r, θ, x, y, z, raytraced_values, original_values):
            φ = 0
            rmax = np.max(r)
            θmax = np.max(θ)
            x_half_step = (x[1] - x[0]) * 0.5
            pi2_recip = (1 / (2 * np.pi))

            visited = np.zeros(original_values.shape)
            for x_index in range(len(x)):
                for z_index in range(len(z)):
                    visited[:,:] = 0
                    for y_index in range(len(y)):
                        # Calculate the coordinates of this point
                        ri = np.sqrt(x[x_index] **2 + y[y_index] ** 2 + z[z_index] ** 2)
                        if ri == 0:
                            continue
                        if ri > rmax:
                            continue
                        θi = np.arccos(z[z_index] / ri)
                        if θi > θmax:
                            continue
                        φi = 0 # Don't care about φi!!

                        chord_length = np.abs(np.arctan2(y[y_index], x[x_index] + x_half_step) - np.arctan2(y[y_index], x[x_index] - x_half_step))

                        # Now find index in r and θ arrays corresponding to this point
                        r_index = np.argmax(r>ri)
                        θ_index = np.argmax(θ>θi)
                        # Only add this if we have not already visited this cell (twice)
                        if visited[r_index, θ_index] <= 1:
                            raytraced_values[x_index, z_index] += original_values[r_index, θ_index] * chord_length * pi2_recip
                            visited[r_index, θ_index] += 1
            #return raytraced_values
            return

        redshift=0.1
        beamsize=5 * u.arcsec
        showbeam=True
        xlim=(-15, 15)
        ylim=(-30, 30)
        xticks=None
        pixel_size=1.8 * u.arcsec
        beam_x=0.8
        beam_y=0.8
        png=False
        contours=True
        should_convolve=True
        half_plane=False
        vmin=-3.0
        vmax=2.0
        style='flux-plot.mplstyle'
        no_labels=False
        with_hist=True
        trc_cutoff = 0.001

        output = np.where(times >= comp_time)[0][0]

        # calculate beam radius
        sigma_beam = (beamsize / 2.355)

        # calculate kpc per arcsec
        kpc_per_arcsec = cosmo.kpc_proper_per_arcmin(redshift).to(u.kpc / u.arcsec)

        # load timestep data file
        d = pk.simulations.load_timestep_data(output, sim)

        X1, X2 = pk.simulations.sphericaltocartesian(d)
        X1 = X1 * (unit_values.length / kpc_per_arcsec).to(u.arcsec).value
        X2 = X2 * (unit_values.length / kpc_per_arcsec).to(u.arcsec).value

        l = radio.get_luminosity(d, unit_values, redshift, beamsize)
        f = radio.get_flux_density(l, redshift).to(u.Jy).value
        #sb = radio.get_surface_brightness(f, d, unit_values, redshift, beamsize).to(u.Jy)

        xmax = (((xlim[1] * u.arcsec + pixel_size) * kpc_per_arcsec) / unit_values.length).si
        xstep = (pixel_size * kpc_per_arcsec / unit_values.length).si
        zmax = (((ylim[1] * u.arcsec + pixel_size) * kpc_per_arcsec) / unit_values.length).si
        zstep = (pixel_size * kpc_per_arcsec / unit_values.length).si
        ymax = max(xmax, zmax)
        ystep = min(xstep, zstep)
        ystep = 0.5

        if half_plane:
            x = np.arange(-xmax, xmax, xstep)
            z = np.arange(-zmax, zmax, zstep)
        else:
            x = np.arange(0, xmax, xstep)
            z = np.arange(0, zmax, zstep)
        y = np.arange(-ymax, ymax, ystep)
        raytraced_flux = np.zeros((x.shape[0], z.shape[0]))

        # print(f'xlim in arcsec is {xlim[1]}, xlim in code units is {xlim[1] * u.arcsec * kpc_per_arcsec / unit_values.length}')
        # print(f'zlim in arcsec is {ylim[1]}, zlim in code units is {ylim[1] * u.arcsec * kpc_per_arcsec / unit_values.length}')
        # print(f'xmax is {xmax}, ymax is {ymax}, zmax is {zmax}')
        # print(f'x shape is {x.shape}; y shape is {y.shape}; z shape is {z.shape}')

        raytrace_surface_brightness(
            r=d.x1,
            θ=d.x2,
            x=x,
            y=y,
            z=z,
            original_values=f,
            raytraced_values=raytraced_flux
        )

        raytraced_flux = raytraced_flux * u.Jy

        # beam information
        sigma_beam_arcsec = beamsize / 2.355
        area_beam_kpc2 = (np.pi * (sigma_beam_arcsec * kpc_per_arcsec)
                          **2).to(u.kpc**2)
        beams_per_cell = (((pixel_size * kpc_per_arcsec) ** 2) / area_beam_kpc2).si
        #beams_per_cell = (area_beam_kpc2 / ((pixel_size * kpc_per_arcsec) ** 2)).si

        # radio_cell_areas = np.full(raytraced_flux.shape, xstep * zstep) * (unit_values.length ** 2)

        # n beams per cell
        #n_beams_per_cell = (radio_cell_areas / area_beam_kpc2).si

        raytraced_flux /= beams_per_cell

        stddev = sigma_beam_arcsec / beamsize
        beam_kernel = Gaussian2DKernel(stddev)
        if should_convolve:
            flux = convolve(raytraced_flux.to(u.Jy), beam_kernel, boundary='extend') * u.Jy
        else:
            flux = raytraced_flux.to(u.Jy)
        #flux = radio.convolve_surface_brightness(raytraced_flux, unit_values, redshift, beamsize)
        #flux = raytraced_flux

        X1 = x * (unit_values.length / kpc_per_arcsec).to(u.arcsec).value
        X2 = z * (unit_values.length / kpc_per_arcsec).to(u.arcsec).value

        # plot data
        a.set_xlim(xlim)
        a.set_ylim(ylim)

        contour_color = 'k'
        contour_linewidth = 0.33
        # contour_levels = [-3, -1, 1, 2]
        contour_levels = [-2, -1, 0, 1, 2] # Contours start at 10 μJy

        im = a.pcolormesh(
            X1,
            X2,
            np.log10(flux.to(u.mJy).value).T,
            shading='flat',
            edgecolors = 'face',
            rasterized = True,
            vmin=vmin,
            vmax=vmax)
        if contours:
            a.contour(X1, X2, np.log10(flux.to(u.mJy).value).T, contour_levels, linewidths=contour_linewidth, colors=contour_color)

        im = a.pcolormesh(
            -X1,
            X2,
            np.log10(flux.to(u.mJy).value).T,
            shading='flat',
            vmin=vmin,
            vmax=vmax)
        im.set_rasterized(True)
        if contours:
            a.contour(-X1, X2, np.log10(flux.to(u.mJy).value).T, contour_levels, linewidths=contour_linewidth, colors=contour_color)

        if not half_plane:
            im = a.pcolormesh(
                X1,
                -X2,
                np.log10(flux.to(u.mJy).value).T,
                shading='flat',
                vmin=vmin,
                vmax=vmax)
            im.set_rasterized(True)
            if contours:
                a.contour(X1, -X2, np.log10(flux.to(u.mJy).value).T, contour_levels, linewidths=contour_linewidth, colors=contour_color)

            im = a.pcolormesh(
                -X1,
                -X2,
                np.log10(flux.to(u.mJy).value).T,
                shading='flat',
                vmin=vmin,
                vmax=vmax)
            im.set_rasterized(True)
            if contours:
                a.contour(-X1, -X2, np.log10(flux.to(u.mJy).value).T, contour_levels, linewidths=contour_linewidth, colors=contour_color)

        (ca, div, cax) = create_colorbar(
            im, a, fig, position='right', padding=0.5)
        ca.set_label(r'$\log_{10}\mathrm{mJy / beam}$')

        circ = plot.Circle(
            (xlim[1] * beam_x, ylim[0] * beam_y),
            color='w',
            fill=True,
            radius=sigma_beam.to(u.arcsec).value,
            alpha=0.7)
        a.add_artist(circ)
Пример #10
0
def find_skycomponents(im: Image,
                       fwhm=1.0,
                       threshold=10.0,
                       npixels=5) -> List[Skycomponent]:
    """ Find gaussian components in Image above a certain threshold as Skycomponent

    :param fwhm: Full width half maximum of gaussian
    :param threshold: Threshold for component detection. Default: 10 standard deviations over median.
    :param im: Image to be searched
    :param params:
    :return: list of sky components
    """

    assert type(im) == Image
    log.info("find_skycomponents: Finding components in Image by segmentation")

    # We use photutils segmentation - this first segments the image
    # into pieces that are thought to contain individual sources, then
    # identifies the concrete source properties. Having these two
    # steps makes it straightforward to extract polarisation and
    # spectral information.

    # Make filter kernel
    sigma = fwhm * gaussian_fwhm_to_sigma
    kernel = Gaussian2DKernel(sigma,
                              x_size=int(1.5 * fwhm),
                              y_size=int(1.5 * fwhm))
    kernel.normalize()

    # Segment the average over all channels of Stokes I
    image_sum = numpy.sum(im.data, axis=(0))[0, ...] / float(im.shape[0])
    segments = segmentation.detect_sources(image_sum,
                                           threshold,
                                           npixels=npixels,
                                           filter_kernel=kernel)
    log.info("find_skycomponents: Identified %d segments" % segments.nlabels)

    # Now get source properties for all polarisations and frequencies
    comp_tbl = [[
        segmentation.source_properties(im.data[chan, pol],
                                       segments,
                                       filter_kernel=kernel,
                                       wcs=im.wcs) for pol in [0]
    ] for chan in range(im.nchan)]

    def comp_prop(comp, prop_name):
        return [[comp_tbl[chan][pol][comp][prop_name] for pol in [0]]
                for chan in range(im.nchan)]

    # Generate components
    comps = []
    for segment in range(segments.nlabels):

        # Get flux and position. Astropy's quantities make this
        # unnecessarily complicated.
        flux = numpy.array(comp_prop(segment, "max_value"))
        # These values seem inconsistent with the xcentroid, and ycentroid values
        # ras = u.Quantity(list(map(u.Quantity,
        #         comp_prop(segment, "ra_icrs_centroid"))))
        # decs = u.Quantity(list(map(u.Quantity,
        #         comp_prop(segment, "dec_icrs_centroid"))))
        xs = u.Quantity(list(map(u.Quantity, comp_prop(segment, "xcentroid"))))
        ys = u.Quantity(list(map(u.Quantity, comp_prop(segment, "ycentroid"))))

        sc = pixel_to_skycoord(xs, ys, im.wcs, 1)
        ras = sc.ra
        decs = sc.dec

        # Remove NaNs from RA/DEC (happens if there is no flux in that
        # polarsiation/channel)
        # ras[numpy.isnan(ras)] = 0.0
        # decs[numpy.isnan(decs)] = 0.0

        # Determine "true" position by weighting
        flux_sum = numpy.sum(flux)
        ra = numpy.sum(flux * ras) / flux_sum
        dec = numpy.sum(flux * decs) / flux_sum
        xs = numpy.sum(flux * xs) / flux_sum
        ys = numpy.sum(flux * ys) / flux_sum

        point_flux = im.data[:, :,
                             numpy.round(ys.value).astype('int'),
                             numpy.round(xs.value).astype('int')]

        # Add component
        comps.append(
            Skycomponent(
                direction=SkyCoord(ra=ra, dec=dec),
                frequency=im.frequency,
                name="Segment %d" % segment,
                flux=point_flux,
                shape='Point',
                polarisation_frame=im.polarisation_frame,
                params={
                    'xpixel': xs,
                    'ypixel': ys,
                    'sum_flux': flux
                }  # Table has lots of data, could add more in
                # future
            ))

    return comps
Пример #11
0
def sim_fakes(location, n_fakes=20, iterations=50, input_mode='flux', PSF='moffat', subtract_method='ois', f_min=0, f_max=40000):
    '''Simulates transient signals (fakes) and tests **OASIS**'s ability to detect them. The procedure of the simulation is as follows:
        
            1. Makes a copy of the specified data set and moves it to the **simulations** directory.
            2. Chooses a random image out of the data set and adds in fakes.
            3. Runs the data set through the **OASIS Pipeline**.
            4. Outputs a catalog of all fakes and whether or not they were detected.
            5. Simulation is repeated with a different set of fakes.
            
            :param str location: Path of data file tree (contains the **configs**, **data**, **psf**, **residuals**, **sources**, **templates** directories). Use a comma-separated list for mapping to multiple datasets.
            :param int n_fakes: Number of fakes added to the chosen image.
            :param int iterations: Number of iterations the simulation goes through. The total number of fakes added is then :math:`n\_fakes * iterations`. It is reccommended to choose *n_fakes* and *iterations* such that the total number of fakes is high, at least a few hundred, ideally more than 1000.
            :param str input_mode: How to interpret fake's flux parameter.
            
                * *flux* (default): Fake's brightness is assumed to be total flux of the fake in ADU and is determined by *f_min* and *f_max* parameters.
                * *mag*: Fake's brightness is given in magnitudes instead of ADU flux. *f_min* and *f_max* are then assumed to be apparent magnitudes rather than ADU counts.
            
            :param str PSF: Type of PSF model used for fake construction. See documentation for details.
            
                * *moffat* (default): Fakes are convolved with a 2D Moffat kernel.
                * *gaussian*: Fakes are convolved with a symmetric 2D Gaussian kernel.
                
            :param str subtract_method: Subtraction method used, can be either *ois* or *hotpants*, default is *ois*. See ``subtract`` method's documentation for details.
            :param float f_min: Minimum flux for fakes. Assumed to either be given in ADU counts or apparent magnitudes depending on *input_mode*.
            :param float f_max: Maximum flux for fakes. Assumed to either be given in ADU counts or apparent magnitudes depending on *input_mode*.
            :returns: Catalog of all fakes, the image they were added to, iteration, and whether or not they were detected. See documentation for details.
    
    '''
    try:
        #pick a random image in location dataset to use as the fake
        dataImages = glob.glob(location + '/data/*.fits')
        imIndex = np.random.randint(low=0, high=(len(dataImages)-1))
        image = dataImages[imIndex]
        tarIndex = len(loc.split('/'))
        target = location.split('/')[tarIndex+2:tarIndex+3]
        target = target[0]
        #copy data to simulations directory
        check = loc + '/OASIS/simulations/' + target
        if os.path.exists(check) == False:
            copy_to_sim(target)
        #rename image paths to correspond to simulations directory
        image = image.replace('targets', 'simulations')
        image_name = image.split('/')[-1]
        length = len(image_name) + 6
        location = image[:-length]
        location = location.replace(target, "%s_fakes" % (target))
        image = image.replace(target, "%s_fakes" % (target))
    #    #copy image to be faked to exposure time directory so it can be retrieved
    #    os.system('cp %s %s' % (image, location))
        #define location of simulation results files
        fake_txt = location + '/results_fake.txt'
        MR_txt = location + '/results_MR.txt'
        #get PSF FWHM of original input image
        if os.path.exists(image.replace('data','psf')[:-4]+'cat') == False:
            sex.sextractor_psf_sim(location, image)
            psfex(location)    
        FWHM = fwhm(image)
        #get input image data and header
        image_hdu = fits.open(image)
        image_header = image_hdu[0].header
        image_data = image_hdu[0].data
        image_mask = image_hdu[1].data
        image_hdu.close()
        shape = image_data.shape
        #move fake image to configs directory
        os.system("mv %s %s/configs" % (image, location))
        #redefine location of image
        image_new_loc = image.replace('data', 'configs')
        #convert input mags to fluxes
        if input_mode == 'mag':
            f_min = mag_to_flux(image, f_min)
            f_max = mag_to_flux(image, f_max)
        fake_name = image
        #perform simulation for 'iterations' number of loops
        for i in tqdm(range(iterations)):
            #define blank results slates
            fake_results = []
            MR_results = []
            #delete all previous simluations data
            clear_image(image)
            #make 'n_fakes' fluxes
            print("-> Creating fakes...")
            flux_scales = np.random.random(n_fakes)
            flux = ((f_max-f_min)*flux_scales) + f_min
            x = [round(shape[0]*np.random.random()) for i in range(n_fakes)]
            y = [round(shape[1]*np.random.random()) for j in range(n_fakes)]
            #print fake sources' info
            print("-> Fake fluxes: \n" + "-> " + str(flux))
            print("-> Fake x: \n" + "-> " + str(x))
            print("-> Fake y: \n" + "-> " + str(y))
            print("-> Fake PSF: %s" % (PSF))
            print("-> Fake FWHM: %.3f\n" % (FWHM))
            if PSF == 'gaussian':
                #make fake image with Gaussian profile
                print("-> Gaussian smearing fakes...")
                gaussian_kernel_1 = Gaussian2DKernel(x_stddev=(FWHM/2.355), y_stddev=(FWHM/2.355))
                gaussian_kernel_2 = Gaussian2DKernel(x_stddev=((FWHM*2)/2.355), y_stddev=((FWHM*2)/2.355))
                conv_kernel = (0.9*gaussian_kernel_1) + (0.1*gaussian_kernel_2)
                fake = make_stars.make_image(shape[0], shape[1], x_loc=x, y_loc=y, fluxes=flux, psf=[conv_kernel])
            elif PSF == 'moffat':
                print("-> Moffat smearing fakes...")
                #define Moffat convolution kernel
                conv_kernel = Moffat2DKernel(gamma=make_stars.get_moffat_gamma(FWHM), alpha=4.765)
                #make image using fluxes na dpositions defined earlier, then convolve with above kernel
                fake = make_stars.make_image(shape[0], shape[1], x_loc=x, y_loc=y, fluxes=flux, psf=[conv_kernel])
            #add fake to original image and overwrite the OG fits file
            print("-> Adding fake to original image...")
            fake += image_data
            hduData = fits.PrimaryHDU(fake, header=image_header)
            hduMask = fits.ImageHDU(image_mask)
            hduList = fits.HDUList([hduData, hduMask])
            hduList.writeto(fake_name, overwrite=True)
            #run images through pipeline
            subtract.subtract_run(location, method=subtract_method)
            #run SExtractor on only fake image
            sex.sextractor_sim(fake_name.replace('_N_', '_A_'))
            #run SExtractor also on master residual to look for fakes
            sex.sextractor_MR(location)
            #find any fakes that were detected by SExtractor in fake catalog
            with open(location+'/sources/filtered_sources.txt', 'r') as src:
                detects = src.readlines()
                src.close()
            for n in range(n_fakes):
                found = 0
                for d in detects:
                    try:
                        float(d.split()[0])
                        if (y[n]-2)<float(d.split()[2])<(y[n]+2) and (x[n]-2)<float(d.split()[3])<(x[n]+2):
                            found += 1
                    except:
                        pass
                fake_results.append([i,image_name,x[n],y[n],flux[n],found])
            #write simulation results to fake_results.txt file
            with open(fake_txt, 'a+') as sim_data:
                sim_data.writelines(tabulate(fake_results))
                sim_data.close()
            #find any fakes that were detected by SExtractor in MR catalog
            with open(location+'/sources/MR_sources_filtered.txt', 'r') as src:
                detects = src.readlines()
                src.close()
            for n in range(n_fakes):
                found = 0
                for d in detects:
                    try: 
                        float(d.split()[0])
                        if (y[n]-2)<float(d.split()[2])<(y[n]+2) and (x[n]-2)<float(d.split()[3])<(x[n]+2):
                            found += 1
                    except:
                        pass
                MR_results.append([i,image_name,x[n],y[n],flux[n],found])
            #write simulation results to MR_results.txt file
            with open(MR_txt, 'a+') as sim_data:
                sim_data.writelines(tabulate(MR_results))
                sim_data.close()
        #move fake image from configs back to data directory
        os.system("mv %s %s/data" % (image_new_loc, location))
    except KeyboardInterrupt:
        print('\n-> Interrupted-- Exiting..')
        try:
            clear_image(image)
            os.system("mv %s %s/data" % (image_new_loc, location))
            sys.exit(0)
        except SystemExit:
            os._exit(0)
Пример #12
0
def plot_beam(theta_fwhm,
              beam_unit,
              boxsize,
              ngrid,
              nproj,
              halocat_file,
              halo_redshift,
              line_name='CII',
              halo_cutoff_mass=1e11,
              use_scatter=True,
              halocat_file_type='npz',
              unit='degree',
              plot_unit='minute',
              tick_num=5,
              add_noise=False,
              random_noise_parcentage=None):

    global final_cov
    #mtd=p.default_constants['minute_to_degree']
    dtm = p.degree_to_minute

    xl, yl, lum = calc_luminosity(boxsize,
                                  ngrid,
                                  nproj,
                                  halocat_file,
                                  halo_redshift,
                                  line_name=line_name,
                                  halo_cutoff_mass=halo_cutoff_mass,
                                  halocat_file_type=halocat_file_type,
                                  use_scatter=use_scatter,
                                  unit=unit)

    if (beam_unit == 'arcmin' or beam_unit == 'arcminute'
            or beam_unit == 'minute'):
        print("Theta FWHM (arc-min):", theta_fwhm)
        theta = theta_fwhm
    if (beam_unit == 'second' or beam_unit == 'arcsecond'
            or beam_unit == 'arcsec'):
        print("Theta FWHM (arc-second):", theta_fwhm)
        theta = theta_fwhm / 60.0

    #luminosity_max=lum.max()
    x_arc = xl * dtm
    y_arc = yl * dtm

    sx = 2e-3 * np.log10(lum)**3  #keep it 0.03*(lum**3)
    sy = 2e-3 * np.log10(lum)**3  #keep it 0.03*(lum**3)

    #sx=np.zeros(len(lum))
    #sy=np.zeros(len(lum))

    beam_std = theta / (np.sqrt(8 * np.log(2.0)))
    gauss_kernel = Gaussian2DKernel(beam_std)

    x_p = np.linspace(0, x_arc.max(), num=ngrid)
    y_p = np.linspace(0, y_arc.max(), num=ngrid)

    x_p, y_p = np.meshgrid(x_p, y_p)

    def beam(amp, x, y, sx, sy):
        gauss_b = Gaussian2D(amp, x, y, sx, sy)
        return gauss_b

    def beam_conv(amp, x, y, sx, sy, x_p, y_p, kernel=gauss_kernel):
        gauss = Gaussian2D(amp, x, y, sx, sy)
        gauss_data = gauss(x_p, y_p)
        smoothed_data = convolve(gauss_data, gauss_kernel)
        return smoothed_data

    flen = len(x_p)
    lum_len = len(lum)
    if (add_noise == False and random_noise_parcentage == None):
        final_conv = 0.001 * np.ones([flen, flen])
    if (add_noise == True):
        #final_conv=random_noise_parcentage *lum.max()* (np.random.rand(flen, flen)-0.5)
        final_conv = 0.01 * np.ones([flen, flen])
    for i in range(lum_len):
        b = beam(lum[i], x_arc[i], y_arc[i], sx[i], sy[i])
        gauss_data = b(x_p, y_p)
        if (add_noise == False and random_noise_parcentage == None):
            final_conv = gauss_data + final_conv
        if (add_noise == True):
            #final_conv=gauss_data+final_conv+random_noise_parcentage *luminosity_max* (np.random.rand(flen, flen)-0.5)
            final_conv = gauss_data + final_conv + random_noise_parcentage * (
                np.random.rand(flen, flen) - 0.5)
        final_conv = convolve(final_conv, gauss_kernel)

    fig, ax = plt.subplots(figsize=(7, 7), dpi=100)

    res = ax.imshow(final_conv,
                    cmap='gist_heat',
                    interpolation='gaussian',
                    origin='lower',
                    rasterized=True,
                    alpha=0.9,
                    vmin=1e3,
                    vmax=1e9,
                    norm=colors.LogNorm())

    if (plot_unit == 'degree'):
        x_tick = (lu.comoving_boxsize_to_degree(halo_redshift, boxsize))
        cell_size = x_tick / ngrid
        ticks = np.linspace(0, x_tick, num=tick_num)
        labels = [str("{:.1f}".format(xx)) for xx in ticks]
        locs = [xx / cell_size for xx in ticks]
        plt.xlabel(r'$\Theta\,(\mathrm{degree})$')
        plt.ylabel(r'$\Theta\,(\mathrm{degree})$')

    if (plot_unit == 'minute'):
        x_tick = (dtm * lu.comoving_boxsize_to_degree(halo_redshift, boxsize))
        cell_size = x_tick / ngrid
        ticks = np.linspace(0, x_tick, num=tick_num)
        labels = [str("{:.1f}".format(xx)) for xx in ticks]
        locs = [xx / cell_size for xx in ticks]
        plt.xlabel(r'$\Theta\,(\mathrm{arc-min})$')
        plt.ylabel(r'$\Theta\,(\mathrm{arc-min})$')

    plt.xticks(locs, labels)
    plt.yticks(locs, labels)

    #title = '$z={:g}$'.format(halo_redshift)
    #plt.title(title, fontsize=18)

    divider = make_axes_locatable(ax)
    cax = divider.append_axes("right", "5%", pad="3%")
    cb = plt.colorbar(res, cax=cax)
    cb.set_label(r'$L$', labelpad=20)
    cb.solids.set_edgecolor("face")
    cb.ax.tick_params('both',
                      which='major',
                      length=3,
                      width=1,
                      direction='out')

    plt.tight_layout()
    plt.savefig("luminsoty_beam.png")
Пример #13
0
            print 'Writing sky subtracted file'
            if os.path.isfile(im_skysub_file): os.remove(im_skysub_file)
            pyfits.writeto(im_skysub_file, skysub_data, header=im_h)
            sys.exit()

            shared_im[:] = skysub_data

        if os.path.isfile(im_convol_seg_file) is False or do_overwrite is True:

            print "Generating kernel to mask small objects"
            print 'Kernel_size: ', kernel_small_size, ' - Kernel_sigma: ', kernel_small_sigma
            tic = time.time()
            kernel_data = np.asarray(
                Gaussian2DKernel(kernel_small_sigma,
                                 x_size=kernel_small_size[0],
                                 y_size=kernel_small_size[1],
                                 mode='integrate'))
            kernel_size = kernel_data.shape

            shared_kernel_base = multiprocessing.Array(
                ctypes.c_float, kernel_size[0] * kernel_size[1])
            shared_kernel = np.ctypeslib.as_array(shared_kernel_base.get_obj())
            shared_kernel = shared_kernel.reshape(kernel_size[0],
                                                  kernel_size[1])
            shared_kernel[:] = kernel_data

            work_queue = multiprocessing.Queue()
            grid_n = np.asarray(grid_n)
            grid_mesh = np.ceil(im_size * 1. / grid_n).astype(int)
            grid_x = np.append(np.arange(0, im_size[0], grid_mesh[0]),
                               im_size[0])
Пример #14
0
def master_synthetic_image_creator(target, seeing=''):
    def mimir_source_finder(image_path, sigma_above_bg, fwhm):
        """Find sources in Mimir images."""

        np.seterr(all='ignore')  #Ignore invalids (i.e. divide by zeros)

        #Find stars in the master image.
        avg, med, stddev = sigma_clipped_stats(
            image, sigma=3.0, maxiters=3)  #Previously maxiters = 5!
        daofind = DAOStarFinder(fwhm=fwhm,
                                threshold=sigma_above_bg * stddev,
                                sky=med,
                                ratio=0.8)
        new_sources = daofind(image)
        x_centroids = new_sources['xcentroid']
        y_centroids = new_sources['ycentroid']
        sharpness = new_sources['sharpness']
        fluxes = new_sources['flux']
        peaks = new_sources['peak']

        #Cut sources that are found within 20 pix of the edges.
        use_x = np.where((x_centroids > 20) & (x_centroids < 1004))[0]
        x_centroids = x_centroids[use_x]
        y_centroids = y_centroids[use_x]
        sharpness = sharpness[use_x]
        fluxes = fluxes[use_x]
        peaks = peaks[use_x]
        use_y = np.where((y_centroids > 20) & (y_centroids < 1004))[0]
        x_centroids = x_centroids[use_y]
        y_centroids = y_centroids[use_y]
        sharpness = sharpness[use_y]
        fluxes = fluxes[use_y]
        peaks = peaks[use_y]

        #Also cut using sharpness, this seems to eliminate a lot of false detections.
        use_sharp = np.where(sharpness > 0.5)[0]
        x_centroids = x_centroids[use_sharp]
        y_centroids = y_centroids[use_sharp]
        sharpness = sharpness[use_sharp]
        fluxes = fluxes[use_sharp]
        peaks = peaks[use_sharp]

        #Cut sources in the lower left, if bars are present.
        #use_ll =  np.where((x_centroids > 512) | (y_centroids > 512))
        #x_centroids  = x_centroids [use_ll]
        #y_centroids  = y_centroids [use_ll]
        #sharpness = sharpness[use_ll]
        #fluxes = fluxes[use_ll]
        #peaks = peaks[use_ll]

        #Cut targets whose y centroids are near y = 512. These are usually bad.
        use_512 = np.where(
            np.logical_or((y_centroids < 509), (y_centroids > 515)))[0]
        x_centroids = x_centroids[use_512]
        y_centroids = y_centroids[use_512]
        sharpness = sharpness[use_512]
        fluxes = fluxes[use_512]
        peaks = peaks[use_512]

        #Cut sources with negative/saturated peaks
        use_peaks = np.where((peaks > 30) & (peaks < 7000))[0]
        x_centroids = x_centroids[use_peaks]
        y_centroids = y_centroids[use_peaks]
        sharpness = sharpness[use_peaks]
        fluxes = fluxes[use_peaks]
        peaks = peaks[use_peaks]

        #Do quick photometry on the remaining sources.
        positions = [(x_centroids[i], y_centroids[i])
                     for i in range(len(x_centroids))]
        apertures = CircularAperture(positions, r=4)
        phot_table = aperture_photometry(image - med, apertures)

        #Cut based on brightness.
        phot_table.sort('aperture_sum')
        cutoff = 1 * std * np.pi * 4**2
        bad_source_locs = np.where(phot_table['aperture_sum'] < cutoff)
        phot_table.remove_rows(bad_source_locs)

        if len(phot_table) > 15:
            x_centroids = phot_table['xcenter'].value[-16:-1]
            y_centroids = phot_table['ycenter'].value[-16:-1]
        else:
            x_centroids = phot_table['xcenter'].value
            y_centroids = phot_table['ycenter'].value

        return (x_centroids, y_centroids)

    def synthetic_image_maker(x_centroids, y_centroids, fwhm):
        #Construct synthetic images from centroid/flux data.
        synthetic_image = np.zeros((1024, 1024))
        sigma = fwhm / 2.355
        for i in range(len(x_centroids)):
            #Cut out little boxes around each source and add in Gaussian representations. This saves time.
            int_centroid_x = int(np.round(x_centroids[i]))
            int_centroid_y = int(np.round(y_centroids[i]))
            y_cut, x_cut = np.mgrid[int_centroid_y - 10:int_centroid_y + 10,
                                    int_centroid_x - 10:int_centroid_x + 10]
            dist = np.sqrt((x_cut - x_centroids[i])**2 +
                           (y_cut - y_centroids[i])**2)
            synthetic_image[y_cut,
                            x_cut] += np.exp(-((dist)**2 / (2 * sigma**2) +
                                               ((dist)**2 / (2 * sigma**2))))
        return (synthetic_image)

    def auto_correlation_seeing(im, cutout_w=15):

        plate_scale = 0.579  #arcsec/pix
        sigma_to_fwhm = 2.355

        #Set a row to NaNs, which will dominate the autocorrelation of Mimir images.
        im[513] = np.nan

        #Interpolate nans in the image, repeating until no nans remain.
        while sum(sum(np.isnan(im))) > 0:
            im = interpolate_replace_nans(im, kernel=Gaussian2DKernel(0.5))

        #Cut 80 pixels near top/bottom edges, which can dominate the fft if they have a "ski jump" feature.
        im = im[80:944, :]
        y_dim, x_dim = im.shape

        #Subtract off a simple estimate of the image background.
        im -= sigma_clipped_stats(im)[1]

        #Do auto correlation
        fft = signal.fftconvolve(im, im[::-1, ::-1], mode='same')

        #Do a cutout around the center of the fft.
        cutout = fft[int(y_dim / 2) - cutout_w:int(y_dim / 2) + cutout_w,
                     int(x_dim / 2) - cutout_w:int(x_dim / 2) + cutout_w]

        #Set the midplane of the cutout to nans and interpolate.
        cutout[cutout_w] = np.nan

        while sum(sum(np.isnan(cutout))) > 0:
            cutout = interpolate_replace_nans(cutout, Gaussian2DKernel(0.25))

        #Subtract off "background"
        cutout -= np.nanmedian(cutout)

        #Fit a 2D Gaussian to the cutout
        #Assume a seeing of 2".7, the average value measured for PINES.
        g_init = models.Gaussian2D(
            amplitude=np.nanmax(cutout),
            x_mean=cutout_w,
            y_mean=cutout_w,
            x_stddev=2.7 / plate_scale / sigma_to_fwhm * np.sqrt(2),
            y_stddev=2.7 / plate_scale / sigma_to_fwhm * np.sqrt(2))
        g_init.x_mean.fixed = True
        g_init.y_mean.fixed = True
        #Set limits on the fitted gaussians between 1".6 and 7".0
        #Factor of sqrt(2) corrects for autocorrelation of 2 gaussians.
        g_init.x_stddev.min = 1.6 / plate_scale / sigma_to_fwhm * np.sqrt(2)
        g_init.y_stddev.min = 1.6 / plate_scale / sigma_to_fwhm * np.sqrt(2)
        g_init.x_stddev.max = 7 / plate_scale / sigma_to_fwhm * np.sqrt(2)
        g_init.y_stddev.max = 7 / plate_scale / sigma_to_fwhm * np.sqrt(2)

        fit_g = fitting.LevMarLSQFitter()
        y, x = np.mgrid[:int(2 * cutout_w), :int(2 * cutout_w)]
        g = fit_g(g_init, x, y, cutout)

        #Convert to fwhm in arcsec.
        seeing_fwhm_as = g.y_stddev.value / np.sqrt(
            2) * sigma_to_fwhm * plate_scale

        return seeing_fwhm_as

    plt.ion()

    target = target.replace(' ', '')

    #By default, point to today's date directory.
    ut_date = datetime.datetime.utcnow()
    if ut_date.month < 10:
        month_string = '0' + str(ut_date.month)
    else:
        month_string = str(ut_date.month)

    if ut_date.day < 10:
        day_string = '0' + str(ut_date.day)
    else:
        day_string = str(ut_date.day)

    date_string = str(ut_date.year) + month_string + day_string

    #Copy the test.fits file to the master_images directory in PINES scripts.
    #test_path =  '/mimir/data/obs72/'+date_string+'/test.fits'
    test_path = '/Users/obs72/Desktop/PINES_scripts/test_image/test.fits'
    target_path = '/Users/obs72/Desktop/PINES_scripts/master_images/' + target + '_master.fits'
    shutil.copyfile(test_path, target_path)

    file_path = '/Users/obs72/Desktop/PINES_scripts/master_images/' + target + '_master.fits'
    calibration_path = '/Users/obs72/Desktop/PINES_scripts/Calibrations/'

    #Open the image and calibration files.
    header = fits.open(file_path)[0].header
    exptime = header['EXPTIME']
    filter = header['FILTNME2']
    image = fits.open(file_path)[0].data[0:1024, :].astype('float')
    dark = fits.open(calibration_path + 'Darks/master_dark_' + str(exptime) +
                     '.fits')[0].data
    flat = fits.open(calibration_path + 'Flats/master_flat_' + filter +
                     '.fits')[0].data
    bpm_path = calibration_path + 'Bad_pixel_masks/bpm.fits'
    bpm = fits.open(bpm_path)[0].data

    #Reduce image.
    image = (image - dark) / flat

    #Interpolate over bad pixels
    image[np.where(bpm)] = np.nan
    kernel = Gaussian2DKernel(x_stddev=1)
    image = interpolate_replace_nans(image, kernel)

    if seeing == '':
        seeing = auto_correlation_seeing(image)
    else:
        seeing = float(seeing)
        #old code had a 2.355 factor. PSM thinks this is a bug
        #daostarfinder_fwhm = seeing*2.355/0.579

        #new code removes 2.355 factor

    print('Using seeing FWHM = {:1.1f}" to detect sources'.format(seeing))
    daostarfinder_fwhm = seeing / 0.579

    #Do a simple 2d background model.
    box_size = 32
    sigma_clip = SigmaClip(sigma=3.)
    bkg_estimator = MedianBackground()
    bkg = Background2D(image, (box_size, box_size),
                       filter_size=(3, 3),
                       sigma_clip=sigma_clip,
                       bkg_estimator=bkg_estimator)
    image = image - bkg.background

    avg, med, std = sigma_clipped_stats(image)

    #Save reduced image to test_image for inspection
    # hdu_reduced = fits.PrimaryHDU(image)
    # hdu_reduced.writeto('/Users/obs72/Desktop/PINES_scripts/test_image/master_reduced.fits',overwrite=True)

    #Find sources in the image.
    (x_centroids, y_centroids) = mimir_source_finder(image,
                                                     sigma_above_bg=5,
                                                     fwhm=daostarfinder_fwhm)

    #Plot the field with detected sources.
    plt.figure(figsize=(9, 9))
    norm = ImageNormalize(image, interval=ZScaleInterval())
    plt.imshow(image, origin='lower', norm=norm)
    plt.plot(x_centroids, y_centroids, 'rx')
    for i in range(len(x_centroids)):
        plt.text(x_centroids[i] + 8,
                 y_centroids[i] + 8,
                 str(i),
                 color='r',
                 fontsize=14)
    plt.title(
        'Inspect to make sure stars were found!\nO for magnification tool, R to reset view'
    )
    plt.tight_layout()
    plt.show()

    print('')
    print('')
    print('')

    #Prompt the user to remove any false detections.
    ids = input(
        'Enter ids of sources to be removed separated by commas (i.e., 4,18,22). If none to remove, hit enter. To break, ctrl + D. '
    )
    if ids != '':
        ids_to_eliminate = [int(i) for i in ids.split(',')]
        ids = [
            int(i) for i in np.linspace(0,
                                        len(x_centroids) - 1, len(x_centroids))
        ]
        ids_to_keep = []
        for i in range(len(ids)):
            if ids[i] not in ids_to_eliminate:
                ids_to_keep.append(ids[i])
    else:
        ids_to_keep = [
            int(i) for i in np.linspace(0,
                                        len(x_centroids) - 1, len(x_centroids))
        ]
    plt.clf()
    plt.imshow(image, origin='lower', vmin=med, vmax=med + 5 * std)
    plt.plot(x_centroids[ids_to_keep], y_centroids[ids_to_keep], 'rx')
    for i in range(len(x_centroids[ids_to_keep])):
        plt.text(x_centroids[ids_to_keep][i] + 8,
                 y_centroids[ids_to_keep][i] + 8,
                 str(i),
                 color='r')

    #Create the synthetic image using the accepted sources.
    synthetic_image = synthetic_image_maker(x_centroids[ids_to_keep],
                                            y_centroids[ids_to_keep], 8)
    plt.figure(figsize=(9, 7))
    plt.imshow(synthetic_image, origin='lower')
    plt.title('Synthetic image')
    plt.show()

    print('')
    print('')
    print('')
    #Now write to a master synthetic image.fits file.
    hdu = fits.PrimaryHDU(synthetic_image, header=header)
    if os.path.exists('master_images/' + target + '_master_synthetic.fits'):
        ans = input(
            'Master image already exists for target, type y to overwrite. ')
        if ans == 'y':
            os.remove('master_images/' + target + '_master_synthetic.fits')
            hdu.writeto('master_images/' + target + '_master_synthetic.fits')
            print('Writing master synthetic image to master_images/' + target +
                  '_master_synthetic.fits')
        else:
            print('New master synthetic image not saved.')
    else:
        hdu.writeto('master_images/' + target + '_master_synthetic.fits')
        print('Writing master synthetic image to master_images/' + target +
              '_master_synthetic.fits')

        #Open list of master images and append new one.
        master_image_list = '/Users/obs72/Desktop/PINES_scripts/input_file.txt'
        file_object = open(master_image_list, 'a')
        append_str = '/Users/obs72/Desktop/PINES_scripts/master_images/' + target + '_master.fits, 2MASS ' + target.split(
            '2MASS')[1]
        file_object.write('\n')
        file_object.write(append_str)
        file_object.close()
Пример #15
0
def rgbmovie(cube, zmin, zmax, logscale=False, minref=0., maxref=0.45, ksz=1, group=2, prefix='frame', hdr=0, duration=0.5):

   sz=np.shape(cube)
   rgb=np.zeros([sz[1],sz[2],3])
   k=0

   images=[]

   for i in range(zmin, zmax):

      tempmap=cube[i-1-group/2:i-1+group/2,:,:].mean(axis=0)
      if(group==0):
         tempmap=cube[i-1,:,:]
      if(ksz > 1):
         inmap=convolve_fft(tempmap, Gaussian2DKernel(ksz))
      else:
         inmap=tempmap
      if(logscale):
         inmap=np.log10(np.copy(inmap))
      inmap[np.isinf(inmap).nonzero()]=minref 
      inmap[(inmap < minref).nonzero()]=minref
      inmap[(inmap > maxref).nonzero()]=maxref
      red=(inmap-np.min(inmap))/(np.max(inmap)-np.min(inmap))
 
      tempmap=cube[i-group/2:i+group/2,:,:].sum(axis=0)/float(group+1)
      if(group==0):
         tempmap=cube[i,:,:]
      if(ksz > 1):
         inmap=convolve_fft(tempmap, Gaussian2DKernel(ksz))
      else:
         inmap=tempmap
      if(logscale):
         inmap=np.log10(np.copy(inmap))
      inmap[np.isinf(inmap).nonzero()]=minref 
      inmap[(inmap < minref).nonzero()]=minref
      inmap[(inmap > maxref).nonzero()]=maxref
      green=(inmap-np.min(inmap))/(np.max(inmap)-np.min(inmap))

      tempmap=cube[i+1-group/2:i+1+group/2,:,:].sum(axis=0)/float(group+1)
      if(group==0):
         tempmap=cube[i+1,:,:]
      if(ksz > 1):
         inmap=convolve_fft(tempmap, Gaussian2DKernel(ksz))
      else:
         inmap=tempmap
      if(logscale):
         inmap=np.log10(np.copy(inmap))
      inmap[np.isinf(inmap).nonzero()]=minref 
      inmap[(inmap < minref).nonzero()]=minref
      inmap[(inmap > maxref).nonzero()]=maxref
      blue=(inmap-np.min(inmap))/(np.max(inmap)-np.min(inmap))

      rgb[:,:,0]=red
      rgb[:,:,1]=green
      rgb[:,:,2]=blue

      fig = plt.figure(figsize=(1.5, 3.0), dpi=300)
      plt.rc('font', size=SMALLER_SIZE)
      if(hdr):
         ax1=plt.subplot(1,1,1, projection=WCS(hdr)) 
         im=ax1.imshow(rgb, origin='lower', interpolation='none')
         ax1.coords.grid(color='white')
         ax1.coords['glon'].set_axislabel('Galactic Longitude')
         ax1.coords['glat'].set_axislabel('Galactic Latitude')
      else:
         ax1=plt.subplot(1,1,1)
         im=ax1.imshow(rgb, origin='lower', interpolation='none')
      ax1.set_title('Projected HI')
      #plt.show()  
      plt.savefig(prefix+'_'+str(k)+'.png', bbox_inches='tight')
      plt.close() 
 
      images.append(imageio.imread(prefix+'_'+str(k)+'.png'))

      k+=1

   imageio.mimsave(prefix+'.gif', images, duration=duration)   
Пример #16
0
def plot_surface_brightness(timestep,
                            unit_values,
                            run_dirs,
                            filename,
                            redshift=0.1,
                            beamsize=5 * u.arcsec,
                            showbeam=True,
                            xlim=(-200, 200),
                            ylim=(-750, 750),   #actually z in arcsec
                            xticks=None,
                            pixel_size=1.8 * u.arcsec,
                            beam_x=0.8,
                            beam_y=0.8,
                            png=True,
                            contours=True,
                            convolve=True,
                            half_plane=True,
                            vmin=-3.0,
                            vmax=2.0,
            
                            no_labels=False,
                            with_hist=False,   #set to false
                            ):
    from plutokore import radio
    from numba import jit
    from astropy.convolution import convolve, Gaussian2DKernel

    @jit(nopython=True)
    def raytrace_surface_brightness(r, theta, x, y, z, raytraced_values, original_values):
        phi = 0
        rmax = np.max(r)
        thetamax = np.max(theta)
        x_half_step = (x[1] - x[0]) * 0.5
        pi2_recip = (1 / (2 * np.pi))

        visited = np.zeros(original_values.shape)
        for x_index in range(len(x)):
            for z_index in range(len(z)):
                visited[:,:] = 0
                for y_index in range(len(y)):
                    # Calculate the coordinates of this point
                    ri = np.sqrt(x[x_index] **2 + y[y_index] ** 2 + z[z_index] ** 2)
                    if ri == 0:
                        continue
                    if ri > rmax:
                        continue
                    thetai = np.arccos(z[z_index] / ri)
                    if thetai > thetamax:
                        continue
                    phii = 0 # Don't care about φi!!

                    chord_length = np.abs(np.arctan2(y[y_index], x[x_index] + x_half_step) - np.arctan2(y[y_index], x[x_index] - x_half_step))

                    # Now find index in r and θ arrays corresponding to this point
                    r_index = np.argmax(r>ri)
                    theta_index = np.argmax(theta>thetai)
                    # Only add this if we have not already visited this cell (twice)
                    if visited[r_index, theta_index] <= 1:
                        raytraced_values[x_index, z_index] += original_values[r_index, theta_index] * chord_length * pi2_recip
                        visited[r_index, theta_index] += 1
        #return raytraced_values
        return

    fig, ax = newfig(1, 1.8)
    #fig, ax = figsize(10,50)

    # calculate beam radius
    sigma_beam = (beamsize / 2.355)

    # calculate kpc per arcsec
    kpc_per_arcsec = cosmo.kpc_proper_per_arcmin(redshift).to(u.kpc / u.arcsec)

    # load timestep data file
    d = pp.pload(timestep,w_dir = run_dir)

    X1, X2 = pk.simulations.sphericaltocartesian(d)
    X1 = X1 * (unit_values.length / kpc_per_arcsec).to(u.arcsec).value
    X2 = X2 * (unit_values.length / kpc_per_arcsec).to(u.arcsec).value

    l = radio.get_luminosity(d, unit_values, redshift, beamsize)
    f = radio.get_flux_density(l, redshift).to(u.Jy).value
    #sb = radio.get_surface_brightness(f, d, unit_values, redshift, beamsize).to(u.Jy)

    xmax = (((xlim[1] * u.arcsec + pixel_size) * kpc_per_arcsec) / unit_values.length).si
    xstep = (pixel_size * kpc_per_arcsec / unit_values.length).si
    zmax = (((ylim[1] * u.arcsec + pixel_size) * kpc_per_arcsec) / unit_values.length).si
    zstep = (pixel_size * kpc_per_arcsec / unit_values.length).si
    ymax = max(xmax, zmax)
    ystep = min(xstep, zstep)
    ystep = 0.5

    if half_plane:
        x = np.arange(-xmax, xmax, xstep)
        z = np.arange(-zmax, zmax, zstep)
    else:
        x = np.arange(0, xmax, xstep)
        z = np.arange(0, zmax, zstep)
    y = np.arange(-ymax, ymax, ystep)
    raytraced_flux = np.zeros((x.shape[0], z.shape[0]))

    # print(f'xlim in arcsec is {xlim[1]}, xlim in code units is {xlim[1] * u.arcsec * kpc_per_arcsec / unit_values.length}')
    # print(f'zlim in arcsec is {ylim[1]}, zlim in code units is {ylim[1] * u.arcsec * kpc_per_arcsec / unit_values.length}')
    # print(f'xmax is {xmax}, ymax is {ymax}, zmax is {zmax}')
    # print(f'x shape is {x.shape}; y shape is {y.shape}; z shape is {z.shape}')

    raytrace_surface_brightness(
        r=d.x1,
        theta=d.x2,
        x=x,
        y=y,
        z=z,
        original_values=f,
        raytraced_values=raytraced_flux
    )

    raytraced_flux = raytraced_flux * u.Jy

    # beam information
    sigma_beam_arcsec = beamsize / 2.355
    area_beam_kpc2 = (np.pi * (sigma_beam_arcsec * kpc_per_arcsec)
                      **2).to(u.kpc**2)
    beams_per_cell = (((pixel_size * kpc_per_arcsec) ** 2) / area_beam_kpc2).si
    #beams_per_cell = (area_beam_kpc2 / ((pixel_size * kpc_per_arcsec) ** 2)).si

    # radio_cell_areas = np.full(raytraced_flux.shape, xstep * zstep) * (unit_values.length ** 2)

    # n beams per cell
    #n_beams_per_cell = (radio_cell_areas / area_beam_kpc2).si

    raytraced_flux /= beams_per_cell

    stddev = sigma_beam_arcsec / beamsize
    beam_kernel = Gaussian2DKernel(stddev)
    if convolve:
        flux = convolve(raytraced_flux.to(u.Jy), beam_kernel, boundary='extend') * u.Jy
    else:
        flux = raytraced_flux.to(u.Jy)
    #flux = radio.convolve_surface_brightness(raytraced_flux, unit_values, redshift, beamsize)
    #flux = raytraced_flux
    
    #return (x, z, flux) # x_coords, z_coords, surfb = plot_surface_brightness(...)

    X1 = x * (unit_values.length / kpc_per_arcsec).to(u.arcsec).value
    X2 = z * (unit_values.length / kpc_per_arcsec).to(u.arcsec).value
    
    return (X1, X2, flux) # x_coords, z_coords, surfb = plot_surface_brightness(...)

    # plot data keep
    ax.set_xlim(xlim)
    ax.set_ylim(ylim)

    contour_color = 'k'
    contour_linewidth = 0.33
    #contour_levels = [-3, -1, 1, 2] 
    contour_levels = [-2, -1, 0, 1, 2] # Contours start at 10 μJy

    #with plt.style.context('flux-plot.mplstyle'): keep
    im = ax.pcolormesh(
            X1,
             X2,
            np.log10(flux.to(u.mJy).value).T,
            shading='flat',
            vmin=vmin,
            vmax=vmax)
    im.set_rasterized(True)
    if contours:
            ax.contour(X1, X2, np.log10(flux.to(u.mJy).value).T, contour_levels, linewidths=contour_linewidth, colors=contour_color)

    im = ax.pcolormesh(
            -X1,
            X2,
            np.log10(flux.to(u.mJy).value).T,
            shading='flat',
            vmin=vmin,
            vmax=vmax)
    im.set_rasterized(True)
    if contours:
            ax.contour(-X1, X2, np.log10(flux.to(u.mJy).value).T, contour_levels, linewidths=contour_linewidth, colors=contour_color)

    if not half_plane:
        im = ax.pcolormesh(
                X1,
                -X2,
                np.log10(flux.to(u.mJy).value).T,
                shading='flat',
                vmin=vmin,
                vmax=vmax)
        im.set_rasterized(True)
        if contours:
                ax.contour(X1, -X2, np.log10(flux.to(u.mJy).value).T, contour_levels, linewidths=contour_linewidth, colors=contour_color)

        im = ax.pcolormesh(
                -X1,
                -X2,
                np.log10(flux.to(u.mJy).value).T,
                shading='flat',
                vmin=vmin,
                vmax=vmax)
        im.set_rasterized(True)
        if contours:
                ax.contour(-X1, -X2, np.log10(flux.to(u.mJy).value).T, contour_levels, linewidths=contour_linewidth, colors=contour_color)

    if with_hist:
        div = make_axes_locatable(ax)   #from mpl_toolkits.axes_grid1 import make_axes_locatable
        ax_hist = div.append_axes('right', '30%', pad=0.0)
        s = np.sum(flux.to(u.mJy).value, axis=0)
        ax_hist.plot(np.concatenate([s, s]), np.concatenate([X2, -X2]))
        ax_hist.set_yticklabels([])

    if not no_labels:
        (ca, div, cax) = create_colorbar(
            im, ax, fig, position='right', padding=0.5)
        ca.set_label(r'$\log_{10}\mathrm{mJy / beam}$')

    circ = plt.Circle(
        (xlim[1] * beam_x, ylim[0] * beam_y),
        color='w',
        fill=True,
        radius=sigma_beam.to(u.arcsec).value,
        alpha=0.7)
    #circ.set_rasterized(True)

    if showbeam:
        ax.add_artist(circ)

    # reset limits
    if not no_labels:
        ax.set_xlabel('X ($\'\'$)')
        ax.set_ylabel('Y ($\'\'$)')
    ax.set_aspect('equal')

    if xticks is not None:
        ax.set_xticks(xticks)

    if no_labels:
        ax.set_position([0, 0, 1, 1])
        ax.set_xticks([])
        ax.set_yticks([])
        ax.set_xticklabels([])
        ax.set_yticklabels([])
        ax.axis('off')

    ax.set_aspect('equal')

    if no_labels:
        savefig(filename, fig, png=png, kwargs={
            'bbox_inches': 'tight',
            'pad_inches': 0}
                )
    else:
        savefig(filename, fig, png=png, dpi = 300)
    plt.show()
Пример #17
0
def rgbcube(cube, zmin, zmax, logscale=False, minref=0., maxref=0., ksz=1, EquiBins=True):

   sz=np.shape(cube)
   cube[np.isnan(cube).nonzero()]=0.
   #cube[(cube < noiselevel)]==noiselevel

   rgb=np.zeros([sz[1],sz[2],3])

   channels=zmax-zmin+1
   indexes=np.arange(zmin,zmax)
   pitch=int(channels/3.)

   meanI=cube[zmin:zmax].mean(axis=(1,2))
   cumsumI=np.cumsum(meanI)
   binwd=np.max(cumsumI)/3.

   # ------------------------------------------------------------------------------------
   firstb=np.max((cumsumI < binwd).nonzero())
   if (EquiBins):
      tempmap=cube[zmin:zmin+firstb-1,:,:].mean(axis=0)
   else:
      tempmap=cube[zmin:zmin+pitch-1,:,:].mean(axis=0)

   if(logscale):
      inmap=np.log10(np.copy(tempmap))
      inmap[np.isnan(inmap).nonzero()]=np.min(inmap[np.isfinite(inmap).nonzero()])
   else:
      if(ksz > 1):
         inmap=convolve_fft(tempmap, Gaussian2DKernel(ksz))
      else:
         inmap=tempmap
   
   if (minref==0.):
      minref=np.min(inmap[np.isfinite(inmap).nonzero()])
   if (maxref==0.):
      maxref=np.max(inmap[np.isfinite(inmap).nonzero()])

   inmap[np.isinf(inmap).nonzero()]=minref
   inmap[(inmap < minref).nonzero()]=minref
   inmap[(inmap > maxref).nonzero()]=maxref
   red=(inmap-np.min(inmap))/(np.max(inmap)-np.min(inmap))

   # ------------------------------------------------------------------------------------
   secondb=np.max((cumsumI < 2.*binwd).nonzero())
   if (EquiBins):
      tempmap=cube[zmin+firstb:zmin+secondb,:,:].mean(axis=0)
   else:
      tempmap=cube[zmin+pitch:zmin+2*pitch-1,:,:].mean(axis=0)

   if(logscale):
      inmap=np.log10(np.copy(tempmap))
      inmap[np.isnan(inmap).nonzero()]=np.min(inmap[np.isfinite(inmap).nonzero()])
   else:
      if(ksz > 1):
         inmap=convolve_fft(tempmap, Gaussian2DKernel(ksz))
      else:
         inmap=tempmap

   if (minref==0.):
      minref=np.min(inmap[np.isfinite(inmap).nonzero()])
   if (maxref==0.):
      maxref=np.max(inmap[np.isfinite(inmap).nonzero()])
   
   inmap[np.isinf(inmap).nonzero()]=minref
   inmap[(inmap < minref).nonzero()]=minref
   inmap[(inmap > maxref).nonzero()]=maxref
   green=(inmap-np.min(inmap))/(np.max(inmap)-np.min(inmap))

   # ------------------------------------------------------------------------------------ 
   if (EquiBins):
      tempmap=cube[zmin+secondb+1:zmax,:,:].mean(axis=0)
   else:
      tempmap=cube[zmin+2*pitch:zmax,:,:].mean(axis=0)

   if(logscale):
      inmap=np.log10(np.copy(tempmap))
      inmap[np.isnan(inmap).nonzero()]=np.min(inmap[np.isfinite(inmap).nonzero()])
   else:
      if(ksz > 1):
         inmap=convolve_fft(tempmap, Gaussian2DKernel(ksz))
      else:
         inmap=tempmap

   if (minref==0.):
      minref=np.min(inmap[np.isfinite(inmap).nonzero()])
   if (maxref==0.):
      maxref=np.max(inmap[np.isfinite(inmap).nonzero()])
      
   inmap[np.isinf(inmap).nonzero()]=minref
   inmap[(inmap < minref).nonzero()]=minref
   inmap[(inmap > maxref).nonzero()]=maxref 
   blue=(inmap-np.min(inmap))/(np.max(inmap)-np.min(inmap))

   rgb[:,:,0]=red
   rgb[:,:,1]=green
   rgb[:,:,2]=blue

   return rgb;
Пример #18
0
def astropy_measures(detection: dict,
                     filter_kernel=None,
                     detect_npixels=5,
                     deblend_npixels=200,
                     deblend_nlevels=32,
                     deblend_contrast=0.001) -> dict:
    # load PNG and convert to grayscale
    data = detection[GRAY]
    data = np.flipud(data)  # flip by Y axis (like in screen)

    # find darkness pixel
    brightest = np.max(data)
    darkest = brightest

    rows = data.shape[0]
    cols = data.shape[1]
    for x in range(0, cols):
        for y in range(0, rows):
            v = data[y, x]
            if v:  # skip pixels with 0 bright
                darkest = min(darkest, v)

    # set threshold as 1/8 in line segment from darkness pixel to brightness
    threshold = np.ones(data.shape) * ((brightest - darkest) / 8 + darkest)

    # used parametrs from tutorial for detect_sources
    kernel = filter_kernel
    if kernel is None:
        sigma = 3.0 * gaussian_fwhm_to_sigma  # FWHM = 3.
        kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3)
        kernel.normalize()
    segm = detect_sources(data,
                          threshold,
                          npixels=detect_npixels,
                          filter_kernel=kernel)

    # break when not found any hit
    if segm is None:
        detection[ASTROPY_FOUND] = 0
        print(detection['id'])
        return detection
    detection[ASTROPY_FOUND] = segm.nlabels
    if segm.nlabels == 0:
        return detection

    # average bright of background
    sum_background = 0
    count_background = 0
    for x in range(0, cols):
        for y in range(0, rows):
            c = segm.data[y, x]
            v = data[y, x]
            if v and not c:  # skip pixels with 0 bright
                sum_background += v
                count_background += 1
    background_bright = sum_background / count_background if count_background else 0

    # rozdzielenie znalezionych obiektów jednych od drugich, parametry jak w tutorialu poza npixels=200,
    # który trzeba było zwiększyć bo niepotrzebnie dzielił na kawałki długie hity, dzięki temu nie dzieli i nie odbiło
    # się to niegatywnie na wyraźnie osobnych hitach na jednym PNG
    # segm_deblend = deblend_sources(data, segm, npixels=deblend_npixels, filter_kernel=kernel, nlevels=deblend_nlevels, contrast=deblend_contrast)

    # analiza znalezionych obiektów (powierzchnia, eliptyczność itd.)
    cat = source_properties(data, segm)

    # zapis wyników na dysku
    nth = 0
    #
    # image = np.arange(16.).reshape(4, 4)
    # segm = SegmentationImage([[1, 1, 0, 0],
    #                           [1, 0, 0, 2],
    #                           [0, 0, 2, 2],
    #                           [0, 2, 2, 0]])
    # cat = source_properties(image, segm)
    # obj = cat[0]
    #
    # cat = source_properties(data, segm_deblend)
    # obj = cat[0]

    for obj in cat:
        nth += 1
        brightest_obj = 0
        brigh_obj_sum = 0
        brightest_obj_sum = 0
        brightest_obj_count = 0

        for x in range(0, int(obj.xmax.value - obj.xmin.value + 1)):
            for y in range(0, int(obj.ymax.value - obj.ymin.value + 1)):
                c = obj.data_cutout[y, x]
                v = data[int(obj.ymin.value + y), int(obj.xmin.value + x)]
                if c:
                    brightest_obj = max(v, brightest_obj)
                    brightest_obj_sum += v
                    brightest_obj_count += 1
                    brigh_obj_sum += v - background_bright

        brightest_obj_avg = brightest_obj_sum / brightest_obj_count

        try:
            s = segm[0]
            another_props = regionprops(label(s.data))[0]
        except:
            continue

        append_to_array(detection, ASTROPY_ELLIPTICITY,
                        float(obj.ellipticity.value))
        append_to_array(detection, ASTROPY_ELONGATION,
                        float(obj.elongation.value))
        append_to_array(detection, ASTROPY_CONVEX_AREA,
                        int(another_props.convex_area))
        append_to_array(detection, ASTROPY_SOLIDITY,
                        float(another_props.solidity))
        append_to_array(detection, ASTROPY_ORIENTATION,
                        float(obj.orientation.value))
        append_to_array(detection, ASTROPY_MAJOR_AXIS_LENGTH,
                        float(another_props.major_axis_length))
        append_to_array(detection, ASTROPY_MINOR_AXIS_LENGTH,
                        float(another_props.minor_axis_length))
        #
        # # zapis pliku z wynikiem zamarkowania detect_sources
        # pngfn = join(pngdir, fn.replace('.png', '-segm.png'))
        # plt.imsave(pngfn, segm_deblend, origin='lower', cmap=segm_deblend.cmap(random_state=12345))
        #
        # # append to CSV file
        # csv = join(csvdir, '%s-segm.csv' % str(device_id))
        # with open(csv, "a") as fcsv:
        #     fcsv.write('%s\n' % '\t'.join(nvalues))
    return detection
Пример #19
0
def convolve_reproj(inp,
                    norm,
                    fwhm,
                    data,
                    wcs_hires,
                    wcs_lowres,
                    offset=None,
                    pixscale=0.03,
                    bounds=np.array([[10, 23], [37, 52]]),
                    inneroffset=None,
                    angle=None):
    """Say what this does

        Parameters
        ----------
        wcs_hires : WCS
            WCS of HST image
        wcs_lowres : WCS
            WCS of KCWI image
        norm : float,optional
            normalization factor; units??
        fwhm : float or 2-element array,optional
            seeing FWHM; if 2 elements, use asymmetric Gaussian; what are units??
            looks like should be arcsec??


        Returns
        -------


        """

    if isinstance(fwhm, float):
        numseepix = fwhm / pixscale
        kern = Gaussian2DKernel(numseepix /
                                2.355)  # FWHM = sigma * 2 sqrt(2.*ln(2))
    else:
        try:
            fwhmx, fwhmy = fwhm
        except:
            import pdb
            pdb.set_trace()
        numseepix_x = fwhmx / pixscale
        numseepix_y = fwhmy / pixscale
        if angle is None:
            angle = 0
        try:
            kern = Gaussian2DKernel(numseepix_x / 2.355,
                                    numseepix_y / 2.355,
                                    theta=angle)
        except:
            import pdb
            pdb.set_trace()
    if (offset is None) & (inneroffset is None):
        convdat = norm * convolve_fft(inp, kern, boundary='wrap')
    elif (inneroffset is None):
        convdat = norm * convolve_fft(inp, kern, boundary='wrap') + offset
    elif (offset is None):
        convdat = norm * convolve_fft(inp + inneroffset, kern, boundary='wrap')
    else:
        convdat = norm * convolve_fft(inp + inneroffset, kern,
                                      boundary='wrap') + offset
    #convdat = norm * convolve(inp, kern, boundary='extend')
    try:
        inpreproj, inpfootprint = \
            reproject_interp((convdat, wcs_hires), wcs_lowres,
                         order='nearest-neighbor',
                         shape_out=np.shape(data))
    except:
        import pdb
        pdb.set_trace()
    if offset is not None:
        inpreproj += offset
    if bounds is not None:
        return inpreproj[bounds[1, 0]:bounds[1, 1], bounds[0, 0]:bounds[0, 1]]
    else:
        return inpreproj
Пример #20
0
    data -= numpy.fromfile(args.ref, dtype=args.dtype, count=-1, sep='')
size = data.size
width = int(math.sqrt(size))
print("Size %dx%d, lambda %g" % (width, width, width / args.theta))
print("Min %g, max %g, mean %g, mean square %g" % (numpy.min(data), numpy.max(data),
                                                   numpy.mean(data), numpy.mean(data**2)))
image = data.reshape((width, width))

# Detect sources, if requested
if args.threshold > 0:

    from astropy.convolution import Gaussian2DKernel, Box2DKernel
    from astropy.stats import gaussian_fwhm_to_sigma
    import astropy.units as u
    from photutils import segmentation
    kernel = Gaussian2DKernel(args.fwhm * gaussian_fwhm_to_sigma,
                              x_size=int(1.5*args.fwhm), y_size=int(1.5*args.fwhm))
    kernel.normalize()
    segments = segmentation.detect_sources(image, args.threshold, npixels=args.npixels, filter_kernel=kernel)
    print("Have %d segments:" % (segments.nlabels))
    props = segmentation.source_properties(image, segments, filter_kernel=kernel)
    for segment in props:
        print("l=%+.6f m=%+.6f intensity=%.f" %
              (segment.xcentroid.value / width * args.theta - args.theta/2,
               segment.ycentroid.value / width * args.theta - args.theta/2,
               segment.max_value))

# Visualise
import matplotlib
matplotlib.use(args.backend)

project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
Пример #21
0
    def vue_spatial_convolution(self, *args):
        """
        Use astropy convolution machinery to smooth the spatial dimensions of
        the data cube.
        """

        size = float(self.stddev)

        label = f"Smoothed {self._selected_data.label} spatial stddev {size}"

        if label in self.data_collection:
            # immediately cancel before smoothing
            snackbar_message = SnackbarMessage(
                "Data with selected stddev already exists, canceling operation.",
                color="error",
                sender=self)
            self.hub.broadcast(snackbar_message)

            return

        # Get information from the flux component
        attribute = self._selected_data.main_components[0]

        cube = self._selected_data.get_object(cls=Spectrum1D,
                                              attribute=attribute,
                                              statistic=None)
        flux_unit = cube.flux.unit

        # Extend the 2D kernel to have a length 1 spectral dimension, so that
        # we can do "3d" convolution to the whole cube
        kernel = np.expand_dims(Gaussian2DKernel(size), 2)

        # TODO: in vuetify >2.3, timeout should be set to -1 to keep open
        #  indefinitely
        snackbar_message = SnackbarMessage(
            "Smoothing spatial slices of cube...",
            loading=True,
            timeout=0,
            sender=self)
        self.hub.broadcast(snackbar_message)

        convolved_data = convolve(cube, kernel)

        # Create a new cube with the old metadata. Note that astropy
        # convolution generates values for masked (NaN) data.
        newcube = Spectrum1D(flux=convolved_data * flux_unit, wcs=cube.wcs)

        # add data to the collection
        self.app.add_data(newcube, label)
        if self.selected_viewer != 'None':
            # replace the contents in the selected viewer with the results from this plugin
            self.app.add_data_to_viewer(self.viewer_to_id.get(
                self.selected_viewer),
                                        label,
                                        clear_other_data=True)

        snackbar_message = SnackbarMessage(
            f"Data set '{self._selected_data.label}' smoothed successfully.",
            color="success",
            sender=self)
        self.hub.broadcast(snackbar_message)
Пример #22
0
    def amr_velocity_moments(self, data=None, weights=None,
                             weight_name=None, box=150.,
                             limXY=default_limXY, nXY=default_npoint,
                             sigfac=10, smear=False, spread=True, **kwargs):
        """Compute velocity moments up to sigma for amr gas
        """
        if not self._check_attrs(['level']):
            print("ERROR: missing some input info in class")
            return None

        if weight_name is not None:
            weights = self._select_weight(weight_name)
        if data is None:
            data = self.vel[:,2]

        # Getting the levels and scales
        list_levels = np.unique(self.level)
        if np.size(nXY) == 2: nX, nY = nXY[0], nXY[1]
        else: nX = nY = nXY
        scalex =(limXY[1] - limXY[0]) / (nX-1)
        scaley =(limXY[3] - limXY[2]) / (nY-1)

        inv_sigfac = 1./ sigfac
        # Deriving the missing info and the spread if needed
        if weights is None: weights = np.ones_like(self.mass)
        if spread:
            [x, y], [weightr], [datar, levelr] = spread_amr([self.pos[:,0],
                                                          self.pos[:,1]],
                                                          self.level,
                                                          list_val=[weights],
                                                          list_const = [data, self.level],
                                                          nsample=10, box=box,
                                                          stretch=[1.,1.])
        else:
            x, y, datar, weightr, levelr = self.pos[:,0], self.pos[:,1], \
                                           data, weights, self.level

        if smear:
            M = np.zeros((nY, nX), dtype=np.float32)
            V = np.zeros((nY, nX), dtype=np.float32)
            S = np.zeros((nY, nX), dtype=np.float32)

            for l in list_levels:
                cells = box / 2**(l)
                selgas = (levelr == l)
                X, Y, Mf, Vt, St = points_2vmaps(x[selgas], y[selgas],
                                                 datar[selgas],
                                                 weights=weightr[selgas],
                                                 limXY=limXY, nXY=nXY, **kwargs)
                Vf = Mf * Vt
                Muf = Mf * (Vt ** 2 + St ** 2)

                # Convolving each one in turn with a Gaussian kernel
                stdx = 2. * cells * gaussian_fwhm_to_sigma / scalex
                stdy = 2. * cells * gaussian_fwhm_to_sigma / scaley
                if (stdx > inv_sigfac) & (stdy > inv_sigfac) & smear:
                    kernel = Gaussian2DKernel(stdx, stdy, x_size=np.int(sigfac*stdx),
                                              y_size=np.int(sigfac*stdy))
                    Mf = convolve_fft(Mf, kernel, fft_pad=False, fill_value=0)
                    Vf = convolve_fft(Vf, kernel, fft_pad=False, fill_value=0)
                    Muf = convolve_fft(Muf, kernel, fft_pad=False, fill_value=0)

                M += Mf
                V += Vf
                S += Muf

            V /= M
            S = np.sqrt(Muf / M - V**2)

        else:
            X, Y, M, V, S = points_2vmaps(x, y, datar, weights=weightr,
                                          limXY=limXY, nXY=nXY)

        mydict = {"X": X, "Y": Y, "V": V, "M": M, "S": S}
        mydict["PAs"] = self.PAs
        mydict["inclinations"] = self.inclinations
        newmap = SnapMap(name="Velocity moments", input_dic=mydict)
        return newmap
Пример #23
0
def make_fitted_plot(self,
                     result,
                     filestart='fitted',
                     show=True,
                     ut=None,
                     smooth=False,
                     savepng=False):
    curdoc().theme = Theme(BOKEH_THEME_FILE)

    if smooth:
        kernel = Gaussian2DKernel(x_stddev=1)
        source = convolve(result['abundance'], kernel, boundary='wrap')
        packets = convolve(result['p_available'], kernel, boundary='wrap')
    else:
        source = result['abundance']
        packets = result['p_available']

    # Tools
    tools = ['save']

    local_time = (result['longitude'].value * 12 / np.pi + 12) % 24
    arg = np.argsort(local_time[:-1])
    source, packets = source[arg, :], packets[arg, :]

    # Distribution of available packets
    fig0 = bkp.figure(plot_width=WIDTH,
                      plot_height=HEIGHT,
                      title=f'{self.species}, {self.query}, Available Packets',
                      x_axis_label='Local Time (hr)',
                      y_axis_label='Latitude (deg)',
                      x_range=[0, 24],
                      y_range=[-90, 90],
                      tools=tools)
    fig0.title.text_font_size = FONTSIZE
    fig0.xaxis.axis_label_text_font_size = FONTSIZE
    fig0.yaxis.axis_label_text_font_size = FONTSIZE
    fig0.xaxis.major_label_text_font_size = NUMFONTSIZE
    fig0.yaxis.major_label_text_font_size = NUMFONTSIZE
    fig0.xaxis.ticker = FixedTicker(ticks=[0, 6, 12, 18, 24])
    fig0.yaxis.ticker = FixedTicker(ticks=[-90, 45, 0, 45, 90])

    fig0.image(image=[packets.transpose()],
               x=0,
               y=-90,
               dw=24,
               dh=180,
               palette='Spectral11')

    # Distribution of packets used in the final model
    fig1 = bkp.figure(plot_width=WIDTH,
                      plot_height=HEIGHT,
                      title=f'{self.species}, {self.query}, Packets Used',
                      x_axis_label='Local Time (hr)',
                      y_axis_label='Latitude (deg)',
                      x_range=[0, 24],
                      y_range=[-90, 90],
                      tools=tools)
    fig1.title.text_font_size = FONTSIZE
    fig1.xaxis.axis_label_text_font_size = FONTSIZE
    fig1.yaxis.axis_label_text_font_size = FONTSIZE
    fig1.xaxis.major_label_text_font_size = NUMFONTSIZE
    fig1.yaxis.major_label_text_font_size = NUMFONTSIZE
    fig1.xaxis.ticker = FixedTicker(ticks=[0, 6, 12, 18, 24])
    fig1.yaxis.ticker = FixedTicker(ticks=[-90, 45, 0, 45, 90])

    fig1.image(image=[source.transpose()],
               x=0,
               y=-90,
               dw=24,
               dh=180,
               palette='Spectral11')

    fig2 = bkp.figure(
        plot_width=WIDTH,
        plot_height=HEIGHT,
        title=f'{self.species}, {self.query}, Speed Distribution',
        x_axis_label='Speed (km/s)',
        y_axis_label='Relative Number',
        y_range=[0, 1.2],
        tools=tools)
    fig2.title.text_font_size = FONTSIZE
    fig2.xaxis.axis_label_text_font_size = FONTSIZE
    fig2.yaxis.axis_label_text_font_size = FONTSIZE
    fig2.xaxis.major_label_text_font_size = NUMFONTSIZE
    fig2.yaxis.major_label_text_font_size = NUMFONTSIZE

    fig2.line(x=result['velocity'][:-1],
              y=result['v_available'],
              legend_label='Packets Available',
              color='red')
    fig2.line(x=result['velocity'][:-1],
              y=result['vdist'],
              legend_label='Packets Used',
              color='blue')

    # Full orbit time series
    # Format the date correction
    self.data['utcstr'] = self.data['utc'].apply(lambda x: x.isoformat()[0:19])

    # Put the dataframe in a useable form
    self.data['lower'] = self.data.radiance - self.data.sigma
    self.data['upper'] = self.data.radiance + self.data.sigma
    self.data['lattandeg'] = self.data.lattan * 180 / np.pi

    m = self.data[self.data.alttan != self.data.alttan.max()].alttan.max()
    col = np.interp(self.data.alttan, np.linspace(0, m, 256),
                    np.arange(256)).astype(int)
    self.data['color'] = [Turbo256[c] for c in col]
    source = bkp.ColumnDataSource(self.data)

    # Tools
    tools = [
        'pan', 'box_zoom', 'wheel_zoom', 'xbox_select', 'hover', 'reset',
        'save'
    ]

    # tool tips
    tips = [('index', '$index'), ('UTC', '@utcstr'),
            ('Radiance', '@radiance{0.2f} kR'),
            ('LTtan', '@loctimetan{2.1f} hr'),
            ('Lattan', '@lattandeg{3.1f} deg'), ('Alttan', '@alttan{0.f} km')]

    # Make the radiance figure
    title_ = f'{self.species}, {self.query}'
    if ut is not None:
        title_ += f', UTC = {ut.isoformat()}'
    else:
        pass

    fig3 = bkp.figure(plot_width=WIDTH,
                      plot_height=HEIGHT,
                      x_axis_type='datetime',
                      title=title_,
                      x_axis_label='UTC',
                      y_axis_label='Radiance (kR)',
                      y_range=[0, self.data.radiance.max() * 1.5],
                      tools=tools,
                      active_drag="xbox_select")
    fig3.title.text_font_size = FONTSIZE
    fig3.xaxis.axis_label_text_font_size = FONTSIZE
    fig3.yaxis.axis_label_text_font_size = FONTSIZE
    fig3.xaxis.major_label_text_font_size = NUMFONTSIZE
    fig3.yaxis.major_label_text_font_size = NUMFONTSIZE

    # plot the data
    dplot = fig3.circle(x='utc',
                        y='radiance',
                        size=7,
                        color='black',
                        legend_label='Data',
                        hover_color='yellow',
                        source=source,
                        selection_color='orange')
    fig3.line(x='utc',
              y='radiance',
              color='black',
              legend_label='Data',
              source=source)
    fig3.xaxis.ticker = DatetimeTicker(num_minor_ticks=5)

    # Add error bars
    fig3.add_layout(
        Whisker(source=source, base='utc', upper='upper', lower='lower'))
    renderers = [dplot]

    # Plot the model
    col = color_generator()
    modplots, maskedplots = [], []
    for modkey, result in self.model_result.items():
        if result.fitted:
            c = next(col)
            fig3.line(x='utc',
                      y=modkey,
                      source=source,
                      legend_label=result.label,
                      color=c)

            maskkey = modkey.replace('model', 'mask')
            mask = self.data[maskkey].to_list()
            view = CDSView(source=source, filters=[BooleanFilter(mask)])
            modplots.append(
                fig3.circle(x='utc',
                            y=modkey,
                            size=7,
                            color=c,
                            source=source,
                            legend_label=result.label,
                            view=view))

            maskkey = modkey.replace('model', 'mask')
            mask = np.logical_not(self.data[maskkey]).to_list()
            view = CDSView(source=source, filters=[BooleanFilter(mask)])
            maskedplots.append(
                fig3.circle(x='utc',
                            y=modkey,
                            size=7,
                            source=source,
                            line_color=c,
                            fill_color='yellow',
                            view=view,
                            legend_label=result.label +
                            '(Data Point Not Used)'))
            renderers.extend(modplots)
            renderers.extend(maskedplots)

        if ut is not None:
            yr = fig3.y_range
            fig3.line(x=[ut, ut], y=[0, 1e5], color='purple')
            fig3.y_range = yr
        else:
            pass

    datahover = HoverTool(tooltips=tips, renderers=renderers)
    fig3.add_tools(datahover)

    grid = gridplot([[fig3, fig2], [fig0, fig1]])

    # Save png version
    if savepng:
        export_png(grid, filename=filestart + '.png')
    else:
        pass

    bkp.output_file(filestart + '.html')
    bkp.save(grid)  # html files not needed

    if show:
        bkp.show(grid)
    else:
        pass

    return grid
Пример #24
0
    y = obdata['Y_IMAGE_08B']
    y = y.astype(int)  #int(x)

    size = 100  # size of square
    newim = imdata[y[0] - size:y[0] + size, x[0] - size:x[0] + size]

    del imdata
    print(newim[size, size])

    imuppthresh = 25
    newim[newim > imuppthresh] = imuppthresh
    imlowthresh = 0
    newim[newim < imlowthresh] = imlowthresh

    ### Convolve Images ###
    kernel = Gaussian2DKernel(0.5)
    newim = convolve(newim, kernel, normalize_kernel=True)

    #    plt.subplot(4, 2, n)
    snap = plt.imshow(newim, animated=True)
    #    plt.plot(size, size, 'k+', markersize=10, mfc='none')
    plt.xticks([])
    plt.yticks([])
    #    plt.title(sem)

    snaps.append([snap])

    n += 1

#%%
ani = animation.ArtistAnimation(fig, snaps, interval=500, blit=True)
Пример #25
0
def get_wcs(pattern):
    for filename in pattern:

        def choose_hdu(pattern):
            finfo = fits.info(filename,
                              output=False)  # Returns a list of tuples.
            finfo_list = [item for item in finfo if 'COMPRESSED_IMAGE' in item]
            if not finfo_list:
                return finfo[0][0]  # 0 if not compressed
            else:
                return finfo_list[0][0]  # 1 if compressed

        which_hdu = choose_hdu(filename)
        with fits.open(filename, 'update') as hdul:
            header = hdul[which_hdu].header
            key = header['OBJECT']
            if key == 'Bias':
                print('Invalid image')
            else:
                pass

            w = WCS(filename)
            print(key)
            print(w)

        image_data = fits.getdata(filename, ext=0)
        from photutils import detect_threshold
        threshold = detect_threshold(image_data, 2)

        from astropy.convolution import Gaussian2DKernel
        from astropy.stats import gaussian_fwhm_to_sigma
        from photutils import detect_sources
        sigma = 3.0 * gaussian_fwhm_to_sigma  # FWHM = 3.
        kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3)
        kernel.normalize()
        segm = detect_sources(image_data,
                              threshold,
                              npixels=5,
                              filter_kernel=kernel)

        import numpy as np
        import matplotlib.pyplot as plt
        from astropy.visualization import SqrtStretch
        from astropy.visualization.mpl_normalize import ImageNormalize
        norm = ImageNormalize(stretch=SqrtStretch())
        fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(10, 12.5))
        ax1.imshow(image_data, origin='lower', cmap='Greys_r', norm=norm)
        ax1.set_title('Data')
        cmap = segm.cmap(random_state=12345)
        ax2.imshow(segm, origin='lower', cmap=cmap)
        ax2.set_title('Segmentation Image')
        plt.show()

        from astropy.coordinates import SkyCoord
        from astropy import units as u
        RA = header['RA']
        DEC = header['DEC']
        print(RA, DEC)
        u.def_unit('RA', 'DEC')
        c = SkyCoord(ra=RA * u.degree, dec=DEC * u.degree)
        catalog = SkyCoord(ra=ra * u.degree, dec=dec *
                           u.degree)  #Come ottengo RA e DEC del catalogo?
        idx, d2d, d3d = c.match_to_catalog_sky(catalog)
        matches = catalog[idx]
        (matches.separation_3d(c) == drd).all()
        dra = (mathces.ra - c.ra).arcmin
        ddec = (matches.dec - c.dec).arcmin
Пример #26
0
 def convolution_gauss2d(img, std=1.):
     kernel = Gaussian2DKernel(x_stddev=std)
     out = convolve(img, kernel)
     return out
Пример #27
0
def make_nooverlap_nvalue_grid_plots_smallgrid(slope_list, density_list,
                                               plottype):

    gs = gridspec.GridSpec(2, 2)
    gs.update(left=0.1,
              right=0.9,
              bottom=0.1,
              top=0.9,
              wspace=0.02,
              hspace=0.02)

    fig = plt.figure(figsize=(9, 5.5))
    ax_1to2 = fig.add_subplot(gs[0, 0])
    ax_2to3 = fig.add_subplot(gs[0, 1])
    ax_3to4 = fig.add_subplot(gs[1, 0])
    ax_4to5 = fig.add_subplot(gs[1, 1])

    all_axes = [ax_1to2, ax_2to3, ax_3to4, ax_4to5]
    diam_bins = ['1to2', '2to3', '3to4', '4to5']
    color_list = ['#67001f', '#377eb8', '#1b9e77', '#984ea3']

    ax_3to4.set_xlabel(r'$\mathrm{Slope}$', fontsize=14)
    ax_3to4.xaxis.set_label_coords(1.02, -0.08)

    ax_3to4.set_ylabel(r'$\mathrm{log(Density)}$', fontsize=14)
    ax_3to4.yaxis.set_label_coords(-0.08, 1.02)

    for i in range(len(all_axes)):

        all_axes[i].scatter(slope_list[i],
                            np.log10(density_list[i]),
                            s=5,
                            c=color_list[i],
                            alpha=0.5,
                            edgecolors='none')

        all_axes[i].set_ylim(-8, 0.5)
        all_axes[i].set_xlim(0, 35)

        x = slope_list[i]
        y = density_list[i]

        # draw contours
        # make sure the arrays dont have NaNs
        slope_fin_idx = np.where(np.isfinite(x))[0]
        density_fin_idx = np.where(np.isfinite(y))[0]
        fin_idx = np.intersect1d(slope_fin_idx, density_fin_idx)

        xp = x[fin_idx]
        yp = y[fin_idx]

        counts, xbins, ybins = np.histogram2d(xp,
                                              np.log10(yp),
                                              bins=25,
                                              normed=False)
        # smooth counts to get smoother contours
        kernel = Gaussian2DKernel(stddev=1.4)
        counts = convolve(counts, kernel, boundary='extend')

        print "Min and max point number density values in bins", str(
            "{:.3}".format(np.min(counts))), str("{:.3}".format(
                np.max(counts)))
        diam_bin = diam_bins[i]
        diam_bin_min = diam_bin.split('to')[0]
        diam_bin_max = diam_bin.split('to')[1]
        levels_to_plot, cb_lw, vmin = get_levels_to_plot(diam_bin,
                                                         plottype=plottype)
        norm = mpl.colors.Normalize(vmin=vmin, vmax=max(levels_to_plot))

        c = all_axes[i].contour(counts.transpose(), levels=levels_to_plot, \
            extent=[xbins.min(), xbins.max(), ybins.min(), ybins.max()], \
            cmap=cm.viridis, linestyles='solid', linewidths=1, \
            zorder=10, norm=norm)

        # add minor ticks and grid
        all_axes[i].minorticks_on()
        all_axes[i].tick_params('both', width=1, length=3, which='minor')
        all_axes[i].tick_params('both', width=1, length=4.7, which='major')

        if i < 2:
            all_axes[i].set_xticklabels([])

        if i == 1 or i == 3:
            all_axes[i].set_yticklabels([])

        if plottype == 'Nvalue':
            all_axes[i].text(0.73, 0.2, r"$\mathrm{N(}$" + str(diam_bin_min) + r'$\mathrm{)}$', \
                verticalalignment='top', horizontalalignment='left', \
                transform=all_axes[i].transAxes, color='k', size=14)

        elif plottype == 'nooverlap':
            all_axes[i].text(0.67, 0.2, str(diam_bin_min) + r'$\mathrm{\, to\, }$' + str(diam_bin_max) + r'$\mathrm{\,km}$', \
                verticalalignment='top', horizontalalignment='left', \
                transform=all_axes[i].transAxes, color='k', size=14)

    ax_3to4.set_xticklabels(['0', '5', '10', '15', '20', '25', '30', '35'])
    ax_4to5.set_xticklabels(['', '5', '10', '15', '20', '25', '30', '35'])

    # save the figure
    if plottype == 'Nvalue':
        fig.savefig(slope_extdir +
                    'slope_v_density_withcontour_smallgrid_Nvalue.png',
                    dpi=300,
                    bbox_inches='tight')
    elif plottype == 'nooverlap':
        fig.savefig(slope_extdir +
                    'slope_v_density_withcontour_smallgrid_nooverlap.png',
                    dpi=300,
                    bbox_inches='tight')

    return None
Пример #28
0
def make_source_mask(data, nsigma, npixels, mask=None, filter_fwhm=None,
                     filter_size=3, kernel=None, sigclip_sigma=3.0,
                     sigclip_iters=5, dilate_size=11):
    """
    Make a source mask using source segmentation and binary dilation.

    Parameters
    ----------
    data : array_like
        The 2D array of the image.

        ..note::
            It is recommended that the user convolve the data with
            ``kernel`` and input the convolved data directly into the
            ``data`` parameter. In this case do not input a ``kernel``,
            otherwise the data will be convolved twice.

    nsigma : float
        The number of standard deviations per pixel above the
        ``background`` for which to consider a pixel as possibly being
        part of a source.

    npixels : int
        The number of connected pixels, each greater than ``threshold``,
        that an object must have to be detected.  ``npixels`` must be a
        positive integer.

    mask : array_like, bool, optional
        A boolean mask with the same shape as ``data``, where a `True`
        value indicates the corresponding element of ``data`` is masked.
        Masked pixels are ignored when computing the image background
        statistics.

    filter_fwhm : float, optional
        Deprecated (use the ``kernel`` keyword).
        The full-width at half-maximum (FWHM) of the Gaussian kernel
        to filter the image before thresholding. ``filter_fwhm``
        and ``filter_size`` are ignored if ``kernel`` is defined.
        ``filter_fwhm`` must be `None` if the input ``data`` are already
        convolved.

    filter_size : float, optional
        Deprecated (use the ``kernel`` keyword).
        The size of the square Gaussian kernel image. Used only if
        ``filter_fwhm`` is defined. ``filter_fwhm`` and ``filter_size``
        are ignored if ``kernel`` is defined. ``filter_size`` must be
        `None` if the input ``data`` are already convolved.

    kernel : array-like (2D) or `~astropy.convolution.Kernel2D`, optional
        The 2D array of the kernel used to filter the image before
        thresholding. Filtering the image will smooth the noise
        and maximize detectability of objects with a shape similar
        to the kernel. ``kernel`` overrides ``filter_fwhm`` and
        ``filter_size``. ``kernel`` must be `None` if the input ``data``
        are already convolved.

    sigclip_sigma : float, optional
        The number of standard deviations to use as the clipping limit
        when calculating the image background statistics.

    sigclip_iters : int, optional
        The maximum number of iterations to perform sigma clipping, or
        `None` to clip until convergence is achieved (i.e., continue
        until the last iteration clips nothing) when calculating the
        image background statistics.

    dilate_size : int, optional
        The size of the square array used to dilate the segmentation
        image.

    Returns
    -------
    mask : 2D bool `~numpy.ndarray`
        A 2D boolean image containing the source mask.
    """
    from scipy import ndimage

    threshold = detect_threshold(data, nsigma, background=None, error=None,
                                 mask=mask, sigclip_sigma=sigclip_sigma,
                                 sigclip_iters=sigclip_iters)

    if kernel is None and filter_fwhm is not None:
        kernel_sigma = filter_fwhm * gaussian_fwhm_to_sigma
        kernel = Gaussian2DKernel(kernel_sigma, x_size=filter_size,
                                  y_size=filter_size)
    if kernel is not None:
        kernel.normalize()

    segm = detect_sources(data, threshold, npixels, kernel=kernel)
    if segm is None:
        return np.zeros(data.shape, dtype=bool)

    selem = np.ones((dilate_size, dilate_size))
    return ndimage.binary_dilation(segm.data.astype(bool), selem)
Пример #29
0
    def find_sources(self,
                     thresh=3.5,
                     extname='sci',
                     extnum=1,
                     dq_mask=None,
                     use_kernel=True,
                     deblend_cont=0.01,
                     kernel_size=5,
                     save=True):
        """ Find sources that are thresh*sigma above background

        Parameters
        ----------
        bkg
        hdr
        thresh
        kernel

        Returns
        -------

        """
        if self.data[f"{extname}{extnum}"] is None:
            self.read_file()

        if use_kernel:
            LOG.info('Creating a 2-D Gaussian kernel')
            kernel = Gaussian2DKernel(1,
                                      x_size=kernel_size,
                                      y_size=kernel_size)
            kernel.normalize()
            kernel = kernel.array
        else:
            LOG.info('Using default kernel')
            kernel = np.ones((kernel_size, kernel_size))

        LOG.info('Generating BKG mesh using SExtractor')
        self.data[f"{extname}{extnum}_bkg"] = \
            sep.Background(self.data[f"{extname}{extnum}"])

        # LOG.info(dq_mask)
        gains = self.data['prhdr']['ATODGN*']
        avg_gain = np.sum(gains.values()) / len(gains)
        source_extract_config = {
            'thresh': thresh,
            'err': self.data[f"{extname}{extnum}_bkg"].globalrms,
            'gain': avg_gain,
            'minarea': 11,
            'filter_kernel': kernel,
            'mask': dq_mask,
            'deblend_nthresh': 32,
            'deblend_cont': deblend_cont,
            'clean': False,
            'segmentation_map': True
        }
        LOG.info('Performing global background subtraction to find sources..')
        bkg_subtracted = self.data[f"{extname}{extnum}"] - \
                         self.data[f"{extname}{extnum}_bkg"]
        source_catalog, segmap = sep.extract(data=bkg_subtracted,
                                             **source_extract_config)
        self.data[f"{extname}{extnum}_segmap"] = segmap
        self.source_catalog[f"{extname}{extnum}"] = Table(source_catalog)
        self.source_catalog[f"{extname}{extnum}"]['id'] = \
            np.arange(0, len(source_catalog['x']))
Пример #30
0
def cubefit_gen(cube,
                ncomp=2,
                paraname=None,
                modname=None,
                chisqname=None,
                guesses=None,
                errmap11name=None,
                multicore=None,
                mask_function=None,
                snr_min=3.0,
                linename="oneone",
                momedgetrim=True,
                saveguess=False,
                **kwargs):
    '''
    Perform n velocity component fit on the GAS ammonia 1-1 data.
    (This should be the function to call for all future codes if it has been proven to be reliable)
    # note: the method can probably be renamed to cubefit()

    Parameters
    ----------
    cube : str
        The file name of the ammonia 1-1 cube or a SpectralCube object
    ncomp : int
        The number of components one wish to fit. Default is 2
    paraname: str
        The output file name of the
    Returns
    -------
    pcube : 'pyspeckit.cubes.SpectralCube.Cube'
        Pyspeckit cube object containing both the fit and the original data cube
    '''

    if hasattr(cube, 'spectral_axis'):
        pcube = pyspeckit.Cube(cube=cube)

    else:
        cubename = cube
        cube = SpectralCube.read(cubename)
        pcube = pyspeckit.Cube(filename=cubename)

    pcube.unit = "K"

    # the following check on rest-frequency may not be necessarily for GAS, but better be safe than sorry
    # note: this assume the data cube has the right units
    if cube._wcs.wcs.restfrq == np.nan:
        # Specify the rest frequency not present
        cube = cube.with_spectral_unit(u.Hz,
                                       rest_value=freq_dict[linename] * u.Hz)
    cube = cube.with_spectral_unit(u.km / u.s, velocity_convention='radio')

    if pcube.wcs.wcs.restfrq == np.nan:
        # Specify the rest frequency not present
        pcube.xarr.refX = freq_dict[linename] * u.Hz
    pcube.xarr.velocity_convention = 'radio'

    # always register the fitter just in case different lines are used
    fitter = ammv.nh3_multi_v_model_generator(n_comp=ncomp,
                                              linenames=[linename])
    pcube.specfit.Registry.add_fitter('nh3_multi_v', fitter, fitter.npars)
    print "number of parameters is {0}".format(fitter.npars)
    print "the line to fit is {0}".format(linename)

    # Specify a width for the expected velocity range in the data
    #v_peak_hwidth = 3.0 # km/s (should be sufficient for GAS Orion, but may not be enough for KEYSTONE)
    v_peak_hwidth = 4.0  # km/s (should be sufficient for GAS Orion, but may not be enough for KEYSTONE)

    if errmap11name is not None:
        errmap11 = fits.getdata(errmap11name)
    else:
        # a quick way to estimate RMS as long as the noise dominates the spectrum by channels
        mask_finite = np.isfinite(cube._data)
        errmap11 = mad_std(cube._data[mask_finite], axis=0)
        print "median rms: {0}".format(np.nanmedian(errmap11))

    snr = cube.filled_data[:].value / errmap11
    peaksnr = np.nanmax(snr, axis=0)

    #the snr map will inetiabley be noisy, so a little smoothing
    kernel = Gaussian2DKernel(1)
    peaksnr = convolve(peaksnr, kernel)

    # trim the edges by 3 pixels to guess the location of the peak emission
    footprint_mask = np.any(np.isfinite(cube._data), axis=0)

    if np.logical_and(footprint_mask.size > 1000, momedgetrim):
        print "triming the edges to make moment maps"
        footprint_mask = binary_erosion(footprint_mask, disk(3))

    # the following function is copied directly from GAS
    def default_masking(snr, snr_min=5.0):
        planemask = (snr > snr_min)
        if planemask.size > 100:
            planemask = remove_small_objects(planemask, min_size=40)
            planemask = opening(planemask, disk(1))
        return (planemask)

    if 'maskmap' in kwargs:
        planemask = kwargs['maskmap']
    elif mask_function is None:
        planemask = default_masking(peaksnr, snr_min=snr_min)
    else:
        planemask = mask_function(peaksnr, snr_min=snr_min)

    print "planemask size: {0}, shape: {1}".format(planemask[planemask].size,
                                                   planemask.shape)

    # masking
    mask = np.isfinite(cube._data) * planemask * footprint_mask

    print "mask size: {0}, shape: {1}".format(mask[mask].size, mask.shape)

    maskcube = cube.with_mask(mask.astype(bool))
    maskcube = maskcube.with_spectral_unit(u.km / u.s,
                                           velocity_convention='radio')

    if guesses is not None:
        v_guess = guesses[::4]
        v_guess[v_guess == 0] = np.nan
    else:
        v_guess = np.nan

    if np.isfinite(v_guess).sum() > 0:
        v_guess = v_guess[np.isfinite(v_guess)]
        v_median = np.median(v_guess)
        print "The median of the user provided velocities is: {0}".format(
            v_median)
        m0, m1, m2 = main_hf_moments(maskcube,
                                     window_hwidth=v_peak_hwidth,
                                     v_atpeak=v_median)
    else:
        m0, m1, m2 = main_hf_moments(maskcube, window_hwidth=v_peak_hwidth)
        v_median = np.median(m1[np.isfinite(m1)])
        print "median velocity: {0}".format(v_median)

        if False:
            # save the moment maps for diagnostic purposes
            hdr_new = copy.deepcopy(pcube.header)
            hdr_new['CDELT3'] = 1
            hdr_new['CTYPE3'] = 'FITPAR'
            hdr_new['CRVAL3'] = 0
            hdr_new['CRPIX3'] = 1

            savename = "{0}_moments.fits".format(
                os.path.splitext(paraname)[0], "parameter_maps")
            fitcubefile = fits.PrimaryHDU(data=np.array([m0, m1, m2]),
                                          header=hdr_new)
            fitcubefile.writeto(savename, overwrite=True)

    # remove the nana values to allow np.nanargmax(m0) to operate smoothly
    m0[np.isnan(
        m0
    )] = 0.0  # I'm not sure if this is a good way to get around the sum vs nansum issue

    # define acceptable v range based on the provided or determined median velocity
    vmax = v_median + v_peak_hwidth
    vmin = v_median - v_peak_hwidth

    # find the location of the peak signal (to determine the first pixel to fit if nearest neighbour method is used)
    peakloc = np.nanargmax(m0)
    ymax, xmax = np.unravel_index(peakloc, m0.shape)

    # set the fit parameter limits (consistent with GAS DR1)
    Texmin = 3.0  # K; a more reasonable lower limit (5 K T_kin, 1e3 cm^-3 density, 1e13 cm^-2 column, 3km/s sigma)
    Texmax = 40  # K; DR1 T_k for Orion A is < 35 K. T_k = 40 at 1e5 cm^-3, 1e15 cm^-2, and 0.1 km/s yields Tex = 37K
    sigmin = 0.07  # km/s
    sigmax = 2.5  # km/s; for Larson's law, a 10pc cloud has sigma = 2.6 km/s
    taumax = 100.0  # a reasonable upper limit for GAS data. At 10K and 1e5 cm^-3 & 3e15 cm^-2 -> 70
    taumin = 0.2  # note: at 1e3 cm^-3, 1e13 cm^-2, 1 km/s linewidth, 40 K -> 0.15
    eps = 0.001  # a small perturbation that can be used in guesses

    # get the guesses based on moment maps
    # tex and tau guesses are chosen to reflect low density, diffusive gas that are likley to have low SNR
    gg = moment_guesses(m1, m2, ncomp, sigmin=sigmin, moment0=m0)

    if guesses is None:
        guesses = gg

    else:
        # fill in the blanks with moment guesses
        guesses[guesses == 0] = np.nan
        gmask = np.isfinite(guesses)
        guesses[~gmask] = gg[~gmask]

        # fill in the failed sigma guesses with moment guesses
        gmask = guesses[1::4] < sigmin
        guesses[1::4][gmask] = gg[1::4][gmask]

        print "user provided guesses accepted"

    # The guesses should be fine in the first case, but just in case, make sure the guesses are confined within the
    # appropriate limits
    guesses[::4][guesses[::4] > vmax] = vmax
    guesses[::4][guesses[::4] < vmin] = vmin
    guesses[1::4][guesses[1::4] > sigmax] = sigmax
    guesses[1::4][guesses[1::4] < sigmin] = sigmin + eps
    guesses[2::4][guesses[2::4] > Texmax] = Texmax
    guesses[2::4][guesses[2::4] < Texmin] = Texmin
    guesses[3::4][guesses[3::4] > taumax] = taumax
    guesses[3::4][guesses[3::4] < taumin] = taumin

    if saveguess:
        # save the guesses for diagnostic purposes
        hdr_new = copy.deepcopy(pcube.header)
        hdr_new['CDELT3'] = 1
        hdr_new['CTYPE3'] = 'FITPAR'
        hdr_new['CRVAL3'] = 0
        hdr_new['CRPIX3'] = 1

        savedir = "{0}/{1}".format(path.dirname(paraname), "guesses")

        try:
            os.makedirs(savedir)
        except OSError as e:
            if e.errno != errno.EEXIST:
                raise

        savename = "{0}_guesses.fits".format(
            path.splitext(paraname)[0], "parameter_maps")
        savename = "{0}/{1}".format(savedir, path.basename(savename))

        fitcubefile = fits.PrimaryHDU(data=guesses, header=hdr_new)
        fitcubefile.writeto(savename, overwrite=True)

    # set some of the fiteach() inputs to that used in GAS DR1 reduction
    if not 'integral' in kwargs:
        kwargs['integral'] = False

    if not 'verbose_level' in kwargs:
        kwargs['verbose_level'] = 3

    if not 'signal_cut' in kwargs:
        kwargs['signal_cut'] = 2

    # Now fit the cube. (Note: the function inputs are consistent with GAS DR1 whenever possible)
    print('start fit')

    # use SNR masking if not provided
    if not 'maskmap' in kwargs:
        print "mask mask!"
        kwargs['maskmap'] = planemask * footprint_mask

    if np.sum(kwargs['maskmap']) < 1:
        print("[WARNING]: maskmap has no pixel, no fitting will be performed")
        return pcube
    elif np.sum(np.isfinite(guesses)) < 1:
        print("[WARNING]: guesses has no pixel, no fitting will be performed")
        return pcube

    pcube.fiteach(fittype='nh3_multi_v',
                  guesses=guesses,
                  start_from_point=(xmax, ymax),
                  use_neighbor_as_guess=False,
                  limitedmax=[True, True, True, True] * ncomp,
                  maxpars=[vmax, sigmax, Texmax, taumax] * ncomp,
                  limitedmin=[True, True, True, True] * ncomp,
                  minpars=[vmin, sigmin, Texmin, taumin] * ncomp,
                  multicore=multicore,
                  **kwargs)

    if paraname != None:
        save_pcube(pcube, paraname, ncomp=ncomp)

    if modname != None:
        model = SpectralCube(pcube.get_modelcube(),
                             pcube.wcs,
                             header=cube.header)
        model.write(modname, overwrite=True)

    if chisqname != None:
        chisq = get_chisq(cube, pcube.get_modelcube(), expand=20)
        chisqfile = fits.PrimaryHDU(data=chisq,
                                    header=cube.wcs.celestial.to_header())
        chisqfile.writeto(chisqname, overwrite=True)

    return pcube