Exemplo n.º 1
0
def find_ellipse(img, mask, background):
    """
    Compute properties of an image (basically fit a 2D Gaussian) and
    determine the corresponding elliptical aperture. 
    
    inputs:
    -------
    filename: string
    
    r: float
        isophotal extent (multiplied by semi-major axes of fitted
        gaussian to determine the elliptical aperture)
        
    extents: array-like, optional
        xmin, xmax, ymin, ymax of sub-image
    """
    cprops = morphology.data_properties(img - background, mask=(mask==0),
                                        background = background)
    tbl = photutils.properties_table(cprops, columns=columns)
    #print tbl
    position = (cprops.xcentroid.value, cprops.ycentroid.value)
    a = cprops.semimajor_axis_sigma.value
    b = cprops.semiminor_axis_sigma.value
    theta = cprops.orientation.value
    return position, a, b, theta
Exemplo n.º 2
0
    def _find_star_properties(self, data, median, mask, star_coo):
        self.info('Finding star properties started')
        sigma = self.config_section.get('fwhm') * gaussian_fwhm_to_sigma
        kernel = Gaussian2DKernel(sigma,
                                  x_size=self.config_section.get('kernel_x'),
                                  y_size=self.config_section.get('kernel_y'))
        kernel.normalize()
        data[mask] = 0
        segm = detect_sources(data,
                              median *
                              self.config_section.get('detect_threshold'),
                              npixels=self.config_section.get('npixels'),
                              filter_kernel=kernel)
        properties = properties_table(
            source_properties(data - np.uint64(median), segm),
            columns=['id', 'xcentroid', 'ycentroid', 'source_sum',
                     'semimajor_axis_sigma', 'semiminor_axis_sigma',
                     'orientation'])

        self.info('Found star properties')
        self.info(properties)

        if len(properties) > 1:
            self.warning('More than one object has been found')
            properties = self._find_nearest_object(
                star_coo, properties, data.shape)
            return properties
        else:
            self.info('Finding star properties finished')
            return properties[0]
Exemplo n.º 3
0
def source_find(img, ota, inst, nbg_std=10.0):
    """
    This function will find sources on an OTA using the detect_sources module
    from photutils. This will return of csv file of the sources found with the
    x,y,Ra,Dec,source_sum,max_value, and elongation of the source. The
    elongation parameter is semimajor_axis / semiminor_axis.
    This output is needed for the source_xy function. This function is set
    to work on the reprojected otas.

    Parameters
    ----------
    img : str
        Name of image
    ota : str
        Name of OTA
    int : str
        Version of ODI used, ``podi`` or ``5odi``
    nbg_std : float
        Multiplier to the standard deviation of the background. It has a default
        value of ``10`` to only detect bright sources

    Note
    ----
    This function produces a ``csv`` file in ``odi.sourcepath`` with the
    following naming convention ``'source_'+ota+'.'+img.base()+'.csv'``.

    """
    image = odi.reprojpath + 'reproj_' + ota + '.' + img.stem()
    QR_raw = odi.fits.open(image)
    # hdu_ota = QR_raw[0]

    hdu_ota = odi.tan_header_fix(QR_raw[0])

    w = odi.WCS(hdu_ota.header)
    # needed to remind astropy that the header says RADESYS=ICRS
    # your mileage may vary (logic probably needed here to handle cases)
    w.wcs.radesys = 'ICRS'
    # if inst == '5odi':
    #     w.wcs.ctype = ["RA---TPV", "DEC--TPV"]
    bg_mean, bg_median, bg_std = odi.mask_ota(img, ota, reproj=True)
    threshold = bg_median + (bg_std * nbg_std)
    print bg_mean, bg_median, bg_std
    segm_img = detect_sources(hdu_ota.data, threshold, npixels=20)
    source_props = source_properties(hdu_ota.data, segm_img, wcs=w)

    columns = [
        'id', 'xcentroid', 'ycentroid', 'ra_icrs_centroid',
        'dec_icrs_centroid', 'source_sum', 'max_value', 'elongation'
    ]
    source_tbl = properties_table(source_props, columns=columns)
    source_tbl_df = source_tbl.to_pandas()

    outputfile = odi.sourcepath + 'source_' + ota + '.' + img.base() + '.csv'

    source_tbl_df.to_csv(outputfile, index=False)
    QR_raw.close()
Exemplo n.º 4
0
def find_centroid(data):
    """
    find the centroid again after the image was rotated
    """

    sigma_clip = SigmaClip(sigma=3., iters=10)
    bkg_estimator = MedianBackground()
    bkg = Background2D(data, (25, 25),
                       filter_size=(3, 3),
                       sigma_clip=sigma_clip,
                       bkg_estimator=bkg_estimator)

    threshold = bkg.background + (3. * bkg.background_rms)

    sigma = 2.0 * gaussian_fwhm_to_sigma  # FWHM = 2.
    kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3)
    kernel.normalize()
    segm = detect_sources(data, threshold, npixels=5, filter_kernel=kernel)

    props = source_properties(data, segm)
    tbl = properties_table(props)

    my_min = 100000.

    x_shape = np.float(data.shape[0])
    y_shape = np.float(data.shape[1])

    r = 3.  # approximate isophotal extent
    apertures = []
    for prop in props:
        position = (prop.xcentroid.value, prop.ycentroid.value)
        #print(position)
        a = prop.semimajor_axis_sigma.value * r
        b = prop.semiminor_axis_sigma.value * r
        theta = prop.orientation.value
        apertures.append(EllipticalAperture(position, a, b, theta=theta))
        my_dist = np.sqrt((prop.xcentroid.value - x_shape / 2.)**2 +
                          (prop.ycentroid.value - y_shape / 2.)**2)
        if (my_dist < my_min):
            my_label = prop.id - 1
            my_min = my_dist

    mytheta = props[my_label].orientation.value
    mysize = np.int(np.round(r * props[my_label].semimajor_axis_sigma.value))
    my_x = props[my_label].xcentroid.value
    my_y = props[my_label].ycentroid.value
    return my_x, my_y, mytheta, mysize
Exemplo n.º 5
0
def grid_smooth(i_ra_f, i_dec_f, fwhm, width, height):
    # bin the filtered stars into a grid with pixel size XXX
    # print "Binning for m-M =",dm
    # bins = 165
    # width = 30
    bins_h = int(height * 60. / 8.)
    bins_w = int(width * 60. / 8.)

    grid, xedges, yedges = np.histogram2d(i_dec_f, i_ra_f, bins=[bins_h,bins_w], range=[[0,height],[0,width]])
    hist_points = zip(xedges,yedges)

    sig = ((bins_w/width)*fwhm)/2.355
    pltsig = fwhm/2.0

    # convolve the grid with a gaussian
    grid_gaus = ndimage.filters.gaussian_filter(grid, sig, mode='constant', cval=0)
    S = np.array(grid_gaus*0)
    S_th = 3.0

    grid_mean = np.mean(grid_gaus)
    grid_sigma = np.std(grid_gaus)
    S = (grid_gaus-grid_mean)/grid_sigma

    above_th = [(int(i),int(j)) for i in range(len(S)) for j in range(len(S[i])) if (S[i][j] >= S_th)]

    segm = detect_sources(S, 2.0, npixels=5)
    props = source_properties(S, segm)
    columns = ['id', 'maxval_xpos', 'maxval_ypos', 'max_value', 'area']
    tbl = properties_table(props, columns=columns)
    # print tbl
    # rand_cmap = random_cmap(segm.max + 1, random_state=12345)

    # find the maximum point in the grid and center the circle there
    x_cent, y_cent = np.unravel_index(grid_gaus.argmax(),grid_gaus.shape)
    x_cent_S, y_cent_S = np.unravel_index(S.argmax(),S.shape)
    # print 'Max of S located at:','('+'{0:6.3f}'.format(y_cent_S)+','+'{0:6.3f}'.format(x_cent_S)+')'
    # print 'Value of S at above:','{0:6.3f}'.format(S[x_cent_S][y_cent_S])
    # print 'Number of bins above S_th: {0:4d}'.format(len(above_th))
    return xedges, x_cent, yedges, y_cent, S, x_cent_S, y_cent_S, pltsig, tbl, segm
Exemplo n.º 6
0
def calc_fwhm_on_bright_star(image_file, print=True, fwhm_init=2.0):
    """Calculate the FWHM on a single bright star (either open or closed loops).

    image_file -- either FITS or PGM
    fwhm_init -- (def=2) pixels for FWHM initial guess

    """

    img = load_image(image_file)

    # Calculate the bacgkround
    bkg = photutils.Background(img,
                               img.shape,
                               filter_shape=(1, 1),
                               method='median')

    threshold = bkg.background + (30.0 * bkg.background_rms)

    sigma = 2.0 * gaussian_fwhm_to_sigma  # FWHM = 2. pixels
    kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3)
    kernel.normalize()
    segm = detect_sources(img, threshold, npixels=5, filter_kernel=kernel)

    props = source_properties(img, segm)
    tbl = properties_table(props)

    # Check for junk stars (cosmic rays)
    idx = np.where((tbl['semimajor_axis_sigma'] > 1)
                   & (tbl['semiminor_axis_sigma'] > 1))[0]
    tbl = tbl[idx]

    tbl['image_name'] = image_file

    if print == True:
        reformat_source_table(tbl)
        print_source_table(tbl)

    return tbl
Exemplo n.º 7
0
def source_find(img,ota,inst):
    """
    This function will find sources on an OTA
    using the detect_sources module from photutils.
    This will return of csv file of the sources found
    with the x,y,Ra,Dec,source_sum,max_value, and
    elongation of the source. The elongation parameter is
    semimajor_axis / semiminor_axis. This output is needed
    for the source_xy function.
    """
    image = odi.reprojpath+'reproj_'+ota+'.'+str(img[16:])
    QR_raw = odi.fits.open(image)
    hdu_ota = QR_raw[0]

    if inst == 'podi':
        pvlist = hdu_ota.header['PV*']
        for pv in pvlist:
            tpv = 'T'+pv
            hdu_ota.header.rename_keyword(pv, tpv, force=False)
    w = odi.WCS(hdu_ota.header)
    #w.wcs.ctype = ["RA---TPV", "DEC--TPV"]
    bg_mean,bg_median,bg_std = odi.mask_ota(img,ota,reproj=True)
    threshold = bg_median + (bg_std * 5.)
    print bg_mean,bg_median,bg_std
    segm_img = detect_sources(hdu_ota.data, threshold, npixels=20)
    source_props = source_properties(hdu_ota.data,segm_img,wcs=w)

    columns = ['id', 'xcentroid', 'ycentroid', 'ra_icrs_centroid',
	       'dec_icrs_centroid','source_sum','max_value','elongation']
    source_tbl = properties_table(source_props,columns=columns)
    source_tbl_df = source_tbl.to_pandas()

    outputfile = odi.sourcepath+'source_'+ota+'.'+str(img[16:-5])+'.csv'

    source_tbl_df.to_csv(outputfile,index=False)
    QR_raw.close()
Exemplo n.º 8
0
def make_source_catalog(model, kernel_fwhm, kernel_xsize, kernel_ysize,
                        snr_threshold, npixels, deblend_nlevels=32,
                        deblend_contrast=0.001, deblend_mode='exponential',
                        connectivity=8, deblend=False):
    """
    Create a final catalog of source photometry and morphologies.

    Parameters
    ----------
    model : `DrizProductModel`
        The input `DrizProductModel` of a single drizzled image.  The
        input image is assumed to be background subtracted.

    kernel_fwhm : float
        The full-width at half-maximum (FWHM) of the 2D Gaussian kernel
        used to filter the image before thresholding.  Filtering the
        image will smooth the noise and maximize detectability of
        objects with a shape similar to the kernel.

    kernel_xsize : odd int
        The size in the x dimension (columns) of the kernel array.

    kernel_ysize : odd int
        The size in the y dimension (row) of the kernel array.

    snr_threshold : float
        The signal-to-noise ratio per pixel above the ``background`` for
        which to consider a pixel as possibly being part of a source.

    npixels : int
        The number of connected pixels, each greater than the threshold
        that an object must have to be detected.  ``npixels`` must be a
        positive integer.

    deblend_nlevels : int, optional
        The number of multi-thresholding levels to use for deblending
        sources.  Each source will be re-thresholded at
        ``deblend_nlevels``, spaced exponentially or linearly (see the
        ``deblend_mode`` keyword), between its minimum and maximum
        values within the source segment.

    deblend_contrast : float, optional
        The fraction of the total (blended) source flux that a local
        peak must have to be considered as a separate object.
        ``deblend_contrast`` must be between 0 and 1, inclusive.  If
        ``deblend_contrast = 0`` then every local peak will be made a
        separate object (maximum deblending).  If ``deblend_contrast =
        1`` then no deblending will occur.  The default is 0.001, which
        will deblend sources with a magnitude differences of about 7.5.

    deblend_mode : {'exponential', 'linear'}, optional
        The mode used in defining the spacing between the
        multi-thresholding levels (see the ``deblend_nlevels`` keyword)
        when deblending sources.

    connectivity : {4, 8}, optional
        The type of pixel connectivity used in determining how pixels
        are grouped into a detected source.  The options are 4 or 8
        (default).  4-connected pixels touch along their edges.
        8-connected pixels touch along their edges or corners.  For
        reference, SExtractor uses 8-connected pixels.

    deblend : bool, optional
        Whether to deblend overlapping sources.  Source deblending
        requires scikit-image.

    Returns
    -------
    catalog : `~astropy.Table`
        An astropy Table containing the source photometry and
        morphologies.
    """

    if not isinstance(model, DrizProductModel):
        raise ValueError('The input model must be an DrizProductModel.')

    # Remove until model.wht contains an IVM map
    # Calculate "background-only" error assuming the weight image is an
    # inverse-variance map (IVM).  The weight image is clipped because it
    # may contain zeros.
    # bkg_error = np.sqrt(1.0 / np.clip(model.wht, 1.0e-20, 1.0e20))
    # threshold = snr_threshold * bkg_error

    # Estimate the 1-sigma noise in the image empirically because model.wht
    # does not yet contain an IVM map
    mask = (model.wht == 0)
    data_mean, data_median, data_std = sigma_clipped_stats(
        model.data, mask=mask, sigma=3.0, iters=10)
    threshold = data_median + (data_std * snr_threshold)

    sigma = kernel_fwhm * gaussian_fwhm_to_sigma
    kernel = Gaussian2DKernel(sigma, x_size=kernel_xsize, y_size=kernel_ysize)
    kernel.normalize()

    segm = photutils.detect_sources(model.data, threshold, npixels=npixels,
                                    filter_kernel=kernel,
                                    connectivity=connectivity)

    # source deblending requires scikit-image
    if deblend:
        segm = photutils.deblend_sources(model.data, segm, npixels=npixels,
                                         filter_kernel=kernel,
                                         nlevels=deblend_nlevels,
                                         contrast=deblend_contrast,
                                         mode=deblend_mode,
                                         connectivity=connectivity,
                                         relabel=True)

    # Calculate total error, including source Poisson noise.
    # This calculation assumes that the data and bkg_error images are in
    # units of electron/s.  Poisson noise is not included for pixels
    # where data < 0.
    exptime = model.meta.resample.product_exposure_time    # total exptime
    #total_error = np.sqrt(bkg_error**2 + np.maximum(model.data / exptime, 0))
    total_error = np.sqrt(data_std**2 + np.maximum(model.data / exptime, 0))

    wcs = model.get_fits_wcs()
    source_props = photutils.source_properties(
        model.data, segm, error=total_error, filter_kernel=kernel, wcs=wcs)

    columns = ['id', 'xcentroid', 'ycentroid', 'ra_icrs_centroid',
               'dec_icrs_centroid', 'area', 'source_sum',
               'source_sum_err', 'semimajor_axis_sigma',
               'semiminor_axis_sigma', 'orientation']
    catalog = photutils.properties_table(source_props, columns=columns)

    # convert orientation to degrees
    catalog = QTable(catalog)
    orient_deg = catalog['orientation'].to(u.deg)
    catalog.replace_column('orientation', orient_deg)

    # define orientation position angle
    rot = _get_rotation(wcs)
    catalog['orientation_sky'] = ((270. - rot +
                                   catalog['orientation'].value) * u.deg)

    # define AB mag and AB mag error
    pixelarea = model.meta.photometry.pixelarea_arcsecsq
    if pixelarea is None:
        micro_Jy = 0.0
    else:
        micro_Jy = (catalog['source_sum'] *
                    model.meta.photometry.conversion_microjanskys *
                    model.meta.photometry.pixelarea_arcsecsq)
    if micro_Jy > 0.0:
        abmag = -2.5 * np.log10(micro_Jy) + 23.9
    else:
        abmag = 0.
    catalog['abmag'] = abmag

    # assuming SNR >> 1 (otherwise abmag_error is asymmetric):
    catalog['abmag_error'] = (2.5 * np.log10(np.e) *
                              catalog['source_sum_err'] /
                              catalog['source_sum'])

    return catalog
Exemplo n.º 9
0
def simple_photometry(lst_of_fits_files,dither):
    """
    Takes list of FPC exposure images, makes a folder to hold the segmentation results,    and returns a list of results from the segmentation photometry.

    INPUT: list of exposure IDs that identify the FPC images
    OUTPUT: python dictionary
        KEY: exposure ID
        ITEM: list of information about the photometry
              - path file
              - area:
              - Sum: sum of flux in pixels
              - exptime: from FITS header (requested exposure time)
              - background: mean background of image
              - x: x_centroid pixel value
              - y: x_centroid pixel value
              - flux: sum /exptime
    """


    d = {}
    print(lst_of_fits_files)
    list = np.array(lst_of_fits_files)
    print(len(list))
    for x in list:
        if dither == 'tel':
            id = x
            d[id] = {}
            filename = 'PROTODESI_FPC_%08d.fits' % x
        if dither == 'pos':
            filename = x
            id = os.path.splitext(x)[0][-3:]
            d[id] = {}
        print(filename)
        path_file_abs = directory+'/'+filename
        d[id]['path'] = filename
        
        [segm, props_list_return] = segmentation_photometry(path_file_abs,
                                                bkg_sigma = 3.0,
                                                source_snr = 3.0,
                                                fwhm_kernel = 2.0,
                                                x_size_kernel = 3,
                                                y_size_kernel = 3,
                                                clobber = False)

        FIBRE_IDS = {0:(1407, 1208), 1:(1209, 1072), 2: (1093, 1864)} #fibre names
        DIST_THRESHOLD = 20

        expreq = fits.open(path_file_abs)[0].header['EXPREQ']

        data = np.array(properties_table(props_list_return,columns=('area','source_sum','background_mean','xcentroid','ycentroid')))
        area = data['area']
        sume = data['source_sum']
        i = np.where(area == max(area))
        flux = sume[i]/expreq

        d[id]['area'] = area[i]
        d[id]['sum'] = sume[i]
        d[id]['exptime'] = expreq
        d[id]['background'] = data['background_mean'][i]
        d[id]['x'] = data['xcentroid'][i]
        d[id]['y'] = data['ycentroid'][i]
        d[id]['flux'] = flux
        

    return d
Exemplo n.º 10
0
def make_source_catalog(model,
                        kernel_fwhm,
                        kernel_xsize,
                        kernel_ysize,
                        snr_threshold,
                        npixels,
                        deblend_nlevels=32,
                        deblend_contrast=0.001,
                        deblend_mode='exponential',
                        connectivity=8,
                        deblend=False):
    """
    Create a final catalog of source photometry and morphologies.

    Parameters
    ----------
    model : `DrizProductModel`
        The input `DrizProductModel` of a single drizzled image.  The
        input image is assumed to be background subtracted.

    kernel_fwhm : float
        The full-width at half-maximum (FWHM) of the 2D Gaussian kernel
        used to filter the image before thresholding.  Filtering the
        image will smooth the noise and maximize detectability of
        objects with a shape similar to the kernel.

    kernel_xsize : odd int
        The size in the x dimension (columns) of the kernel array.

    kernel_ysize : odd int
        The size in the y dimension (row) of the kernel array.

    snr_threshold : float
        The signal-to-noise ratio per pixel above the ``background`` for
        which to consider a pixel as possibly being part of a source.

    npixels : int
        The number of connected pixels, each greater than the threshold
        that an object must have to be detected.  ``npixels`` must be a
        positive integer.

    deblend_nlevels : int, optional
        The number of multi-thresholding levels to use for deblending
        sources.  Each source will be re-thresholded at
        ``deblend_nlevels``, spaced exponentially or linearly (see the
        ``deblend_mode`` keyword), between its minimum and maximum
        values within the source segment.

    deblend_contrast : float, optional
        The fraction of the total (blended) source flux that a local
        peak must have to be considered as a separate object.
        ``deblend_contrast`` must be between 0 and 1, inclusive.  If
        ``deblend_contrast = 0`` then every local peak will be made a
        separate object (maximum deblending).  If ``deblend_contrast =
        1`` then no deblending will occur.  The default is 0.001, which
        will deblend sources with a magnitude differences of about 7.5.

    deblend_mode : {'exponential', 'linear'}, optional
        The mode used in defining the spacing between the
        multi-thresholding levels (see the ``deblend_nlevels`` keyword)
        when deblending sources.

    connectivity : {4, 8}, optional
        The type of pixel connectivity used in determining how pixels
        are grouped into a detected source.  The options are 4 or 8
        (default).  4-connected pixels touch along their edges.
        8-connected pixels touch along their edges or corners.  For
        reference, SExtractor uses 8-connected pixels.

    deblend : bool, optional
        Whether to deblend overlapping sources.  Source deblending
        requires scikit-image.

    Returns
    -------
    catalog : `~astropy.Table`
        An astropy Table containing the source photometry and
        morphologies.
    """

    if not isinstance(model, DrizProductModel):
        raise ValueError('The input model must be an DrizProductModel.')

    # Remove until model.wht contains an IVM map
    # Calculate "background-only" error assuming the weight image is an
    # inverse-variance map (IVM).  The weight image is clipped because it
    # may contain zeros.
    # bkg_error = np.sqrt(1.0 / np.clip(model.wht, 1.0e-20, 1.0e20))
    # threshold = snr_threshold * bkg_error

    # Estimate the 1-sigma noise in the image empirically because model.wht
    # does not yet contain an IVM map
    mask = (model.wht == 0)
    data_mean, data_median, data_std = sigma_clipped_stats(model.data,
                                                           mask=mask,
                                                           sigma=3.0,
                                                           iters=10)
    threshold = data_median + (data_std * snr_threshold)

    sigma = kernel_fwhm * gaussian_fwhm_to_sigma
    kernel = Gaussian2DKernel(sigma, x_size=kernel_xsize, y_size=kernel_ysize)
    kernel.normalize()

    segm = photutils.detect_sources(model.data,
                                    threshold,
                                    npixels=npixels,
                                    filter_kernel=kernel,
                                    connectivity=connectivity)

    # source deblending requires scikit-image
    if deblend:
        segm = photutils.deblend_sources(model.data,
                                         segm,
                                         npixels=npixels,
                                         filter_kernel=kernel,
                                         nlevels=deblend_nlevels,
                                         contrast=deblend_contrast,
                                         mode=deblend_mode,
                                         connectivity=connectivity,
                                         relabel=True)

    # Calculate total error, including source Poisson noise.
    # This calculation assumes that the data and bkg_error images are in
    # units of electron/s.  Poisson noise is not included for pixels
    # where data < 0.
    exptime = model.meta.resample.product_exposure_time  # total exptime
    #total_error = np.sqrt(bkg_error**2 + np.maximum(model.data / exptime, 0))
    total_error = np.sqrt(data_std**2 + np.maximum(model.data / exptime, 0))

    wcs = model.get_fits_wcs()
    source_props = photutils.source_properties(model.data,
                                               segm,
                                               error=total_error,
                                               filter_kernel=kernel,
                                               wcs=wcs)

    columns = [
        'id', 'xcentroid', 'ycentroid', 'ra_icrs_centroid',
        'dec_icrs_centroid', 'area', 'source_sum', 'source_sum_err',
        'semimajor_axis_sigma', 'semiminor_axis_sigma', 'orientation'
    ]
    catalog = photutils.properties_table(source_props, columns=columns)

    # convert orientation to degrees
    catalog = QTable(catalog)
    orient_deg = catalog['orientation'].to(u.deg)
    catalog.replace_column('orientation', orient_deg)

    # define orientation position angle
    rot = _get_rotation(wcs)
    catalog['orientation_sky'] = ((270. - rot + catalog['orientation'].value) *
                                  u.deg)

    # define AB mag and AB mag error
    pixelarea = model.meta.photometry.pixelarea_arcsecsq
    if pixelarea is None:
        micro_Jy = 0.0
    else:
        micro_Jy = (catalog['source_sum'] *
                    model.meta.photometry.conversion_microjanskys *
                    model.meta.photometry.pixelarea_arcsecsq)
    if micro_Jy > 0.0:
        abmag = -2.5 * np.log10(micro_Jy) + 23.9
    else:
        abmag = 0.
    catalog['abmag'] = abmag

    # assuming SNR >> 1 (otherwise abmag_error is asymmetric):
    catalog['abmag_error'] = (2.5 * np.log10(np.e) *
                              catalog['source_sum_err'] /
                              catalog['source_sum'])

    return catalog
Exemplo n.º 11
0
    def simple_photometry(self):
        """
        Takes list of FPC exposure images, makes a folder to hold the segmentation results,    and returns a list of results from the segmentation photometry.

        INPUT: list of exposure IDs that identify the FPC images
        OUTPUT: python dictionary
            KEY: exposure ID
            ITEM: list of information about the photometry
              - path file
              - area:
              - Sum: sum of flux in pixels
              - exptime: from FITS header (requested exposure time)
              - background: mean background of image
              - x: x_centroid pixel value
              - y: x_centroid pixel value
              - flux: sum /exptime
        """
        self.list_of_files()
        for i, values in self.d.items():
            path_file_abs = values['path']

            #Get info from fits header
            exptime = fits.open(path_file_abs)[0].header['EXPREQ']
            date = fits.open(path_file_abs)[0].header['DATE-OBS']
            time = fits.open(path_file_abs)[0].header['MJD-OBS']
            try:
                delta_ra = fits.open(path_file_abs)[0].header['DELTARA']
                delta_dec = fits.open(path_file_abs)[0].header['DELTADEC']
            except:
                delta_ra = None
                delta_dec = None
            self.d[i]['exptime'] = exptime
            self.d[i]['delta_ra'] = delta_ra
            self.d[i]['delta_dec'] = delta_dec
            self.d[i]['date'] = date
            self.d[i]['time'] = time


            #Do photometry
            [segm, props_list_return] = segmentation_photometry(path_file_abs,
                                                bkg_sigma = 3.0,
                                                source_snr = 3.0,
                                                fwhm_kernel = 2.0,
                                                x_size_kernel = 3,
                                                y_size_kernel = 3,
                                                clobber = False)

            #Identify individual fibers
            # there may be more than 3 sources returned, depending on parameters
            # for each of the fibres, look for exactly one match in sources

            fibers_found = []
            if self.match_type == 'location':
                for fiber_num, location in self.fiber_ids.items():
                    for props in props_list_return:

                    # check distance to known fibre position
                    # prop.centroid is in (y, x) format
                        dist = np.linalg.norm(
                                np.fliplr([props.centroid.value])[0]
                                - location)
                        if dist < self.DIST_THRESHOLD:
                            fibers_found.append(fiber_num)
                            print("Fiber %d was identified" % fiber_num)
                            self.d[i][fiber_num] = {}
                            self.d[i][fiber_num]['seg_object'] = props
                            # remove the props of identified source from props list
                            #props_list_return.remove(props)
                            # stop searching for current fibre
                            #break
                for fiber_num in fibers_found:
                    props = self.d[i][fiber_num]['seg_object']
                    data = np.array(properties_table(props,columns=('area','source_sum','background_mean','centroid','max_value')))

                    self.d[i][fiber_num]['area'] = data['area']
                    self.d[i][fiber_num]['sum'] = data['source_sum']
                    self.d[i][fiber_num]['background'] = data['background_mean']
                    self.d[i][fiber_num]['centroid'] = data['centroid']
                    self.d[i][fiber_num]['flux'] = data['source_sum']/exptime
                    self.d[i][fiber_num]['max_value'] = data['max_value']

                    #else:
                    #    # this is not the fibre propwe are looking for
                    #    print('Cannot match fiber %d' % fiber_num)


            elif self.match_type == 'max':
                if self.max_fiber_num == 'all':
                    print("Can only run max for single fibers")
                else:
                    try:
                        max_fiber = int(self.max_fiber_num)
                        self.d[i][max_fiber] = {}
                    except:
                        print("fiber number has to be 3001, 3002, 3003")
                try:
                    all_data = np.array(properties_table(props_list_return,columns=('area')))
                    area = all_data['area']
                    x = np.where(area == max(area))
                    if len(x[0]) > 1:
                        print("Couldn't find a maximum")
                    elif len(x[0]) == 1:
                        prop = props_list_return[x[0]]
                        self.d[i][max_fiber]['seg_object'] = prop
                        #Put info in dictionary
                        props = self.d[i][max_fiber]['seg_object']
                        data = np.array(properties_table(props,columns=('area','source_sum','background_mean','centroid','max_value')))

                        self.d[i][max_fiber]['area'] = data['area']
                        self.d[i][max_fiber]['sum'] = data['source_sum']
                        self.d[i][max_fiber]['background'] = data['background_mean']
                        self.d[i][max_fiber]['centroid'] = data['centroid']
                        self.d[i][max_fiber]['flux'] = data['source_sum']/exptime
                        self.d[i][max_fiber]['max_value'] = data['max_value']
                except:
                    print("no area to speak of")
Exemplo n.º 12
0
def segmentation_photometry(path_file_abs,
                            bkg_sigma=3.0,
                            source_snr=3.0,
                            fwhm_kernel=2.0,
                            x_size_kernel=3,
                            y_size_kernel=3,
                            clobber=False):
    """
    aperture photometry from source segmentation
    make_source_mask not yet available in photutils v0.2.2, this version
    manually creates a source mask for determining background

    """

    import os
    import copy
    import glob
    import pickle
    import numpy as np
    from scipy import ndimage
    import matplotlib
    #matplotlib.rcParams['text.usetex'] = True
    #matplotlib.rcParams['text.latex.unicode'] = True
    #from matplotlib.backends.backend_pdf import PdfPages
    import matplotlib.pyplot as plt
    from astropy.io import fits, ascii
    from astropy.convolution import Gaussian2DKernel
    from astropy.stats import sigma_clipped_stats, gaussian_fwhm_to_sigma
    #    from astropy.table import Table
    from astropy.visualization import (LogStretch, mpl_normalize)
    #    from astropy.extern.six.moves import StringIO
    from photutils import (detect_threshold, EllipticalAperture,
                           source_properties, properties_table)
    from photutils.detection import detect_sources
    from photutils.utils import random_cmap

    # create preliminary mask
    #from photutils import make_source_mask
    #masterMask = make_source_mask(master, snr=2, npixels=5, dilate_size=11)

    #        if LEDoff was used, get threshold from LEDoff/background
    #        path_dataset = os.path.dirname(path_file_abs) + os.path.sep
    #        filenameCombined = '\t'.join(
    #            os.listdir(os.path.join(datasetDirLocal, 'master')))
    #        if 'master_ledoff_subtracted' in filename:
    #            print('Using master_ledoff')
    #            # path_file_abs = os.path.join(datasetDir, 'master', filename)
    #            hdu = fits.open(path_file_abs)[0]
    #            data_subtracted = hdu.data
    #            # calculate threadhold
    #            ledoff_pred = np.mean(data_subtracted) * \
    #                np.ones(data_subtracted.shape)
    #            mse = mean_squared_error(data_subtracted, ledoff_pred)
    #            rmse = np.sqrt(mse)
    #            threshold = 7.0 * rmse
    #            threshold_value = threshold
    #         if no LEDoff was used, background subtraction is needed
    #         there should exist no file named "subtracted"
    #         if 'master.fit' in filenameCombined \
    #             or 'master_normalised.fit' in filenameCombined:

    #filenamedir = os.path.basename(path_file_abs)
    #print(filenamedir)
    #New stuff made by Parker
    f_dir, filename = os.path.split(path_file_abs)
    ff = os.path.splitext(filename)[0]
    new_dir = f_dir + '/' + ff

    if not os.path.exists(new_dir):
        os.makedirs(new_dir)
    dir_save = new_dir
    print("The photometry files will be saved in ", dir_save)
    filenames_combined = '\t'.join(os.listdir(dir_save))

    if clobber == False \
        and 'segm.obj' in filenames_combined  \
        and 'props.obj' in filenames_combined \
        and 'props.csv' in filenames_combined\
        and 'props.ecsv' in filenames_combined:
        print('Photometry properties table already exists. Reading objects...')
        segm = pickle.load(
            open(glob.glob(os.path.join(dir_save, '*segm.obj*'))[0], 'rb'))
        props = pickle.load(
            open(glob.glob(os.path.join(dir_save, '*props.obj*'))[0], 'rb'))

        return [segm, props]

    if 'master' in path_file_abs:
        if 'normalised' in path_file_abs:
            print('Performing photometry to ' +
                  'normalised master object image {}...'.format(path_file_abs))
        else:
            print('Performing photometry to ' +
                  'un-normalised master image {}...'.format(path_file_abs))
    else:
        print('Warning: Photometry being performed to ' +
              'a single exposure {}...'.format(path_file_abs))

    hdu = fits.open(path_file_abs)[0]
    data = hdu.data
    header = hdu.header

    if 'EXPREQ' in header:
        exptime = header['EXPREQ']
    elif 'EXPTIME' in header:
        exptime = header['EXPTIME']
    else:
        print('Exposure time not found in header. Cannot determine magnitude.')
        exptime = np.nan

    # === Iteratively determine background level ===

    # assuming background is homogenous, estimate background by sigma clipping
    # if background noise varies across image, generate 2D background instead
    print('Determining background noise level...' '')
    [mean, median, std] = sigma_clipped_stats(data, sigma=bkg_sigma, iters=5)
    threshold = median + (std * 2.0)
    segm = detect_sources(data, threshold, npixels=5)
    # turn segm into a mask
    mask = segm.data.astype(np.bool)
    # dilate the source mask to ensure complete masking of detected sources
    dilate_structure = np.ones((5, 5))
    mask_dilated = ndimage.binary_dilation(mask, structure=dilate_structure)
    # get sigma clipping stats of background, without sources that are masekd
    [bkg_mean, bkg_median, bkg_std] = sigma_clipped_stats(data,
                                                          sigma=bkg_sigma,
                                                          mask=mask_dilated,
                                                          iters=3)

    # === Detect sources by segmentation ===

    print('Determining threshold for source detection...')
    # determine threshold for source detection
    # in current implementation, if all inputs are present, the formula is
    # threshold = background + (background_error * snr)
    threshold = detect_threshold(data,
                                 background=bkg_median,
                                 error=bkg_std,
                                 snr=source_snr)
    print('Preparing 2D Gaussian kernal...')
    sigma_kernel = fwhm_kernel * gaussian_fwhm_to_sigma
    kernel = Gaussian2DKernel(sigma_kernel,
                              x_size=x_size_kernel,
                              y_size=y_size_kernel)
    # normalise kernel
    # The kernel models are normalized per default, ∫∞−∞f(x)dx=1∫−∞∞f(x)dx=1.
    # But because of the limited kernel array size, the normalization
    # for kernels with an infinite response can differ from one.
    kernel.normalize()
    # obtain a  SegmentationImage object with the same shape as the data,
    # where sources are labeled by different positive integer values.
    # A value of zero is always reserved for the background.
    # if the threshold includes the background level as above, then the image
    # input into detect_sources() should not be background subtracted.
    print('Segmentation processing...')
    segm = detect_sources(data, threshold, npixels=5, filter_kernel=kernel)
    print('Segmentation labels are: ', repr(segm.labels))

    # === Measure regional source properties ===

    # source_properties() assumes that the data have been background-subtracted.
    # Background is the background level that was previously present
    # in the input data.
    # The input background does not get subtracted from the input data,
    # which should already be background-subtracted.
    print('Extracting source properties...')
    props = source_properties(data - bkg_median, segm, background=bkg_median)
    # add flux and instrumental magnitude to properties
    # flux = source_sum / exptime
    # instrumental magnitude = -2.5 * log10(flux)
    for i in range(len(props)):
        # source_sum is by definition background-subtracted already
        props[i].flux = props[i].source_sum / exptime
        props[i].mag_instr = -2.5 * np.log10(props[i].flux)
    # make plots and save to images
    # define approximate isophotal ellipses for each object
    apertures = []
    r = 2.8  # approximate isophotal extent
    for prop in props:
        position = (prop.xcentroid.value, prop.ycentroid.value)
        a = prop.semimajor_axis_sigma.value * r
        b = prop.semiminor_axis_sigma.value * r
        theta = prop.orientation.value
        apertures.append(EllipticalAperture(position, a, b, theta=theta))

    # create a table of properties
    try:
        props_table = properties_table(props)
    except:
        print('No source detected in {}'.format(path_file_abs))
        return [None, None]

    props_table['flux'] = [props[i].flux for i in range(len(props))]
    props_table['mag_instr'] = [props[i].mag_instr for i in range(len(props))]
    # add custom columns to the table: mag_instru and flux

    # plot centroid and segmentation using approximate elliptical apertures
    norm = mpl_normalize.ImageNormalize(stretch=LogStretch())
    rand_cmap = random_cmap(segm.max + 1, random_state=12345)
    #[fig1, (ax1, ax2)] = plt.subplots(1, 2, figsize = (12, 6))
    #ax1.imshow(data, origin='lower', cmap=plt.cm.gray, norm=norm)
    #ax1.plot(
    #        props_table['xcentroid'], props_table['ycentroid'],
    #        ls='none', color='blue', marker='+', ms=10, lw=1.5)
    #ax2.imshow(segm, origin='lower', cmap=rand_cmap)
    #for aperture in apertures:
    #    aperture.plot(ax=ax1, lw=1.0, alpha=1.0, color='red')
    #    aperture.plot(ax=ax2, lw=1.0, alpha=1.0, color='red')
    # plot using actual segmentation outlines (to be improved)
    #[fig2, ax3] = plt.subplots(figsize = (6, 6))
    #ax3.imshow(data, origin='lower', cmap=plt.cm.gray, norm=norm)
    #segm_outline = np.array(segm.outline_segments(), dtype=float)
    #segm_outline[segm_outline<1] = np.nan
    # get a copy of the gray color map
    #segm_outline_cmap = copy.copy(plt.cm.get_cmap('autumn'))
    # set how the colormap handles 'bad' values
    #segm_outline_cmap.set_bad(alpha=0)
    #ax3.imshow(segm_outline, origin='lower', cmap=segm_outline_cmap)

    # === save ===
    # Save segm, porps to object files, and also save props to table file.
    print('Saving segmentation and source propdderties to {}...'.format(
        dir_save))
    try:
        # if filename ends with fits, remove it in the  filename
        if filename[-5:] == '.fits':
            dir_save_prefix = os.path.join(dir_save, filename[0:-5])
        else:
            dir_save_prefix = os.path.join(dir_save, filename)
        # Enhanced CSV allows preserving table meta-data such as
        # column data types and units.
        # In this way a data table can be stored and read back as ASCII
        # with no loss of information.
        ascii.write(props_table,
                    dir_save_prefix + '-phot_props.ecsv',
                    format='ecsv')
        # csv for readability in MS excel
        ascii.write(props_table,
                    dir_save_prefix + '-phot_props.csv',
                    format='csv')

        # dump segmentation and properties to object files in binary mode
        file_segm = open(dir_save_prefix + '-phot_segm.obj', 'wb')
        pickle.dump(segm, file_segm)
        file_props = open(dir_save_prefix + '-phot_props.obj', 'wb')
        pickle.dump(props, file_props)

        # save figures
        #fig1.savefig(dir_save_prefix + '-phot_segm_fig1.png', dpi=600)
        #pp1 = PdfPages(dir_save_prefix + '-phot_segm_fig1.pdf')
        #pp1.savefig(fig1)
        #pp1.close()

        #fig2.savefig(dir_save_prefix + '-phot_segm_fig2.png', dpi=600)
        #pp2 = PdfPages(dir_save_prefix + '-phot_segm_fig2.pdf')
        #pp2.savefig(fig2)
        #pp2.close()

        print('Segmentation, properties objects, tables, and images saved to',
              dir_save)
    except:
        print('Unable to write to disk, check permissions.')

    return [segm, props]
Exemplo n.º 13
0
def segmentation_photometry(path_image_abs,
                            path_error_abs = None,
                            logger = None,
                            bkg_sigma = 1.5,
                            source_snr = 1.05,
                            fwhm_kernel = 25,
                            x_size_kernel = 100,
                            y_size_kernel = 80,
                            dump_pickle = False,
                            clobber = True):

    """

    given a fits file (master image), this function calculates
    photometry by source segmentation.

    make_source_mask not yet available in photutils v0.2.2, this version
    manually creates a source mask for determining background.

    """
    
    def msg(string, msgtype = None):
        
        if logger == None:
            print(string)
        else:
            print(string)
            if msgtype == 'info':
                logger.info(string)
            if msgtype == 'error':
                logger.error(string)
            if msgtype == 'warning':
                logger.warning(string)   

    filename = os.path.basename(path_image_abs)
    dir_save = os.path.dirname(path_image_abs)
    filenames_combined = '\t'.join(os.listdir(dir_save))

    if clobber == False \
        and filename[0:-5]+'-segm.obj' in filenames_combined  \
        and filename[0:-5]+'-props.obj' in filenames_combined \
        and filename[0:-5]+'-centroid_outline.png' in filenames_combined \
        and filename[0:-5]+'-centroid_outline.pdf' in filenames_combined \
        and filename[0:-5]+'-segmentation.png' in filenames_combined \
        and filename[0:-5]+'-segmentation.pdf' in filenames_combined:
        msg('Photometry properties table already exists. '
            + 'Reading pickles...', 
            msgtype='info')

        try:
            segm = pickle.load(open(glob.glob(os.path.join(
                                dir_save, filename[0:-5]+'-segm.obj*'))[0],
                                'rb'))
            props_list = pickle.load(open(glob.glob(os.path.join(
                                dir_save, filename[0:-5]+'-props.obj*'))[0],
                                'rb'))
            return [segm, props_list]
        except:
            # pickle file corrupt or empty, proceed
            pass
    elif clobber == False \
        and filename[0:-5]+'-logstretch.png' in filenames_combined \
        and filename[0:-5]+'-logstretch.pdf' in filenames_combined:
        msg('Non-detection from previous results.',
            msgtype='info')
        return [None, []]
        
    # image type notifications
    if 'master' in path_image_abs:
        if 'normalised' in path_image_abs:
            msg('Performing photometry to '
                + 'normalised master object image {}...'.format(path_image_abs),
                msgtype='info')
        else:
            msg('Performing photometry to '
                + 'un-normalised master image {}...'.format(path_image_abs),
                msgtype='info')
    elif 'reduced' in path_image_abs:
        msg('Performing photometry to '
                + 'reduced image frame {}...'.format(path_image_abs),
                msgtype='info')
    else:
        msg('Warning: Photometry being performed to '
            + 'a single exposure {}...'.format(path_image_abs),
            msgtype='warning')
    
    # read in data
    try:
        hdu = fits.open(path_image_abs)['FPC']
        data = hdu.data
    except:
        hdu = fits.open(path_image_abs)[0]
        data = hdu.data
    # read in error in data
    msg('Reading master error image {}...'
        .format(path_error_abs))
    try:
        hdu_error = fits.open(path_error_abs)[0]
        data_error = hdu_error.data
    except:
        data_error = np.zeros(data.shape)
        msg('No master error image available for {}'
            .format(path_image_abs))

    header = hdu.header

    if 'EXPREQ' in header:
        exptime = header['EXPREQ']
    elif 'EXPTIME' in header:
        exptime = header['EXPTIME']
    else:
        msg('Exposure time not found in header. '
            + 'Cannot determine magnitude.',
            msgtype='error')
        exptime = np.nan

    # === Iteratively determine background level ===

    # assuming backcground is homogenous, estimate background by sigma clipping
    # if background noise varies across image, generate 2D background instead
    # using the Background function
    msg('Determining background noise level...',
        msgtype='info')
    [mean, median, std] = sigma_clipped_stats(data, sigma=bkg_sigma, iters=3)
    threshold = median + (std * 4)
    segm = detect_sources(data, threshold, npixels=5)
    # turn segm into a mask
    mask = segm.data.astype(np.bool)
    # dilate the source mask to ensure complete masking of detected sources
    dilate_structure = np.ones((5, 5))
    mask_dilated = ndimage.binary_dilation(mask, structure=dilate_structure)
    # get sigma clipping stats of background, without sources that are masekd
    [bkg_mean, bkg_median, bkg_std] = sigma_clipped_stats(
        data, sigma=bkg_sigma, mask=mask_dilated, iters = 3)

    # === Detect sources by segmentation ===
    msg('Determining threshold for source detection...',
        msgtype='info')
    # determine threshold for source detection
    # in current implementation, if all inputs are present, the formula is
    # threshold = background + (background_error * snr)
    threshold = detect_threshold(data,
                                 background = bkg_median,
                                 error = data_error+bkg_std,
                                 snr = source_snr)
    # calculate total error including poisson statistics
    try:
        # this is for v0.3 and above
        msg('Calculating total errors including background and Poisson...',
            msgtype='info')
        err_tot = calc_total_error(data, 
                                   bkg_error= data_error+bkg_std, 
                                   effective_gain=0.37)
        gain = None
    # in version earlier than 0.3, this function is not available
    except:
        # error must be of the same shape as the data array
        # this is for v0.2.2
        err_tot = data_error + bkg_std
        gain = 0.37
    msg('Preparing 2D Gaussian kernal...',
        msgtype='info')
    sigma_kernel = fwhm_kernel * gaussian_fwhm_to_sigma
    kernel = Gaussian2DKernel(sigma_kernel,
                           x_size = x_size_kernel,
                           y_size = y_size_kernel)
    # normalise kernel
    # The kernel models are normalized per default, ∫∞−∞f(x)dx=1∫−∞∞f(x)dx=1.
    # But because of the limited kernel array size, the normalization
    # for kernels with an infinite response can differ from one.
    kernel.normalize()
    # obtain a  SegmentationImage object with the same shape as the data,
    # where sources are labeled by different positive integer values.
    # A value of zero is always reserved for the background.
    # if the threshold includes the background level as above, then the image
    # input into detect_sources() should not be background subtracted.
    msg('Segmentation processing...',
        msgtype='info')
    segm = detect_sources(data, threshold, npixels=5, filter_kernel=kernel)
    msg('Segmentation labels are: ' + repr(segm.labels),
        msgtype='info')

    # === Measure regional source properties ===

    # source_properties() assumes that the data have been background-subtracted.
    # Background is the background level that was previously present
    # in the input data.
    # The input background does not get subtracted from the input data,
    # which should already be background-subtracted.
    msg('Extracting source properties...',
        msgtype='info')
    if gain is None:
        # gain is no longer supported in v0.3 and included in total error array
        props_list = source_properties(data-bkg_median, segm, 
                               background = bkg_median,
                               error = err_tot)
    else: # still in v0.2.2
        props_list = source_properties(data-bkg_median, segm, 
                                       background = bkg_median,
                                       error = err_tot,
                                       effective_gain = gain)
    # add more properties that are not automatically calculated
    for i in range(len(props_list)):
        # source_sum is by definition background-subtracted already
        props_list[i].flux = props_list[i].source_sum/exptime
        props_list[i].flux_err = props_list[i].source_sum_err/exptime
        # flux = source_sum / exptime
        # instrumental magnitude = -2.5 * log10(flux)
        props_list[i].mag_instr = -2.5 * np.log10(props_list[i].flux)
        props_list[i].mag_instr_err = -2.5 / props_list[i].flux / np.log(10) \
                                        * props_list[i].flux_err
        # assuming fwhm of a circule gaussian of the same cross section area
        props_list[i].fwhm = gaussian_sigma_to_fwhm * np.sqrt(
                            props_list[i].semimajor_axis_sigma.value
                            * props_list[i].semiminor_axis_sigma.value)
    # make plots and save to images
    # define approximate isophotal ellipses for each object
    apertures = []
    r = 5 # approximate isophotal extent
    for props in props_list:
        position = (props.xcentroid.value, props.ycentroid.value)
        a = props.semimajor_axis_sigma.value * r
        b = props.semiminor_axis_sigma.value * r
        theta = props.orientation.value
        apertures.append(EllipticalAperture(position, a, b, theta=theta))
        
    # === plot and save ===
        
    # if filename ends with fits, remove it in the  filename
    if filename[-5:] == '.fits':
        path_save_prefix = os.path.join(dir_save, filename[0:-5])
    else:
        path_save_prefix = os.path.join(dir_save, filename)
        
    norm_log = mpl_normalize.ImageNormalize(vmin=0, vmax=2000,
                                            stretch = LogStretch())

    if len(props_list) > 0:
        
        # Save segm, porps to object files, and also save props to table file.
        msg('Saving segmentation and source properties to {}...'
            .format(dir_save),
            msgtype='info')
        
        # at least one source was detected
        # create a table of properties
        props_table = properties_table(props_list)
        # add custom columns to the table: mag_instru and flux
        props_table['flux'] = [props_list[i].flux
                                for i in range(len(props_list))]
        props_table['flux_err'] = [props_list[i].flux_err
                                for i in range(len(props_list))]
        props_table['mag_instr'] = [props_list[i].mag_instr
                                        for i in range(len(props_list))]
        props_table['mag_instr_err'] = [props_list[i].mag_instr_err
                                        for i in range(len(props_list))]
        props_table['fwhm'] = [props_list[i].fwhm
                                for i in range(len(props_list))]

        # plot centroid and segmentation outline
        [fig1, ax1] = plt.subplots(figsize=(4, 3))
        ax1.imshow(data, origin='lower', cmap=plt.cm.gray, norm=norm_log)
        ax1.plot(props_table['xcentroid'], props_table['ycentroid'],
                 linestyle='none', color='red',
                 marker='+', markersize=2, markeredgewidth=0.1, alpha=1)
        segm_outline = np.array(segm.outline_segments(), dtype=float)
        segm_outline[segm_outline<1] = np.nan
        # get a copy of the gray color map
        segm_outline_cmap = plt.cm.winter
        # set how the colormap handles 'bad' values
        segm_outline_cmap.set_bad(alpha=0)
        ax1.imshow(segm_outline, 
                   origin='lower', cmap=segm_outline_cmap, alpha=1)
        ax1.get_xaxis().set_visible(False)
        ax1.get_yaxis().set_visible(False)
        fig1.tight_layout()
    
        # segmentation image and aperture using approximate elliptical apertures
        [fig2, ax2] = plt.subplots(figsize=(4, 3))
        rand_cmap = random_cmap(segm.max + 1, random_state=8)
        ax2.imshow(segm, origin='lower', cmap=rand_cmap)
        ax2.plot(props_table['xcentroid'], props_table['ycentroid'],
                 linestyle='none', color='red',
                 marker='+', markersize=2, markeredgewidth=0.1, alpha=1)
        for aperture in apertures:
            aperture.plot(ax=ax2, lw=0.1, alpha=1, color='lime')
        ax2.axis('off')
        ax2.get_xaxis().set_visible(False)
        ax2.get_yaxis().set_visible(False)
        fig2.tight_layout()
        
        try:
            # Enhanced CSV allows preserving table meta-data such as
            # column data types and units.
            # In this way a data table can be stored and read back as ASCII
            # with no loss of information.
            ascii.write(props_table, path_save_prefix + '-props.ecsv',
                        format = 'ecsv')
            # csv for readability in MS excel
            ascii.write(props_table, path_save_prefix + '-props.csv',
                        format = 'csv')
    
            # save figures
            fig1.savefig(path_save_prefix + '-centroid_outline.png',
                         bbox_inches='tight', pad_inches=0, dpi=1200)
            fig2.savefig(path_save_prefix + '-segmentation.png',
                         bbox_inches='tight', pad_inches=0, dpi=2000)
                         
            pp1 = PdfPages(path_save_prefix + '-centroid_outline.pdf')
            pp1.savefig(fig1, dpi=1200)
            pp1.close()
    
            pp2 = PdfPages(path_save_prefix + '-segmentation.pdf')
            pp2.savefig(fig2, dpi=2000)
            pp2.close()

            if dump_pickle:
                # dump segmentation and properties to objects in binary mode
                file_segm = open(path_save_prefix + '-segm.obj', 'wb')
                pickle.dump(segm, file_segm)
                file_props = open(path_save_prefix + '-props.obj', 'wb')
                pickle.dump(props_list, file_props)
    
            msg('Segmentation, properties objects, tables, and images '
                + 'saved to {}'.format(dir_save),
                msgtype='info')
                
        except:
            msg('Unable to write to disk, check permissions.',
                msgtype='error')
            
        # memory leak?
        try:
            plt.close('all')
            del (hdu, hdu_error, data, data_error, header, mask, mask_dilated, 
                 err_tot, kernel, apertures, norm_log, props_table, 
                 segm_outline, segm_outline_cmap, rand_cmap, 
                 fig1, ax1, fig2, ax2, pp1, pp2, file_segm, file_props)
        except:
            pass
        return [segm, props_list]

    else:
        msg('No source detected in {}'.format(path_image_abs),
            msgtype='warning')
            
        # save log scale stretched image, if no source was detected
        [fig0, ax0] = plt.subplots(figsize=(4, 3))
        ax0.imshow(data, origin='lower', cmap=plt.cm.gray, norm=norm_log)
        ax0.get_xaxis().set_visible(False)
        ax0.get_yaxis().set_visible(False)
        
        try:
            fig0.savefig(path_save_prefix + '-logstretch.png',
                         bbox_inches='tight', pad_inches=0, dpi=1200)
            pp0 = PdfPages(path_save_prefix + '-logstretch.pdf')
            pp0.savefig(fig0, dpi=1200)
            pp0.close()
            
        except:
            msg('Unable to write to disk, check permissions.',
                msgtype='error')
            
        return [None, []]
Exemplo n.º 14
0
    def aperture_photometry(self, filename):

        # aperture photometry from source segmentation

        # determine threshold for background detection
        # if LEDoff was used, get threshold from LEDoff/background
        filepath = os.path.join(datasetDirLocal, 'master', filename)
        filenameCombined = '\t'.join(
            os.listdir(os.path.join(datasetDirLocal, 'master')))
        if 'master_ledoff_subtracted' in filename:
            self.msg('Using master_ledoff')
            # filepath = os.path.join(datasetDir, 'master', filename)
            hdu = fits.open(filepath)[0]
            data_subtracted = hdu.data
            # calculate threadhold
            ledoff_pred = np.mean(data_subtracted) * np.ones(
                data_subtracted.shape)
            mse = mean_squared_error(data_subtracted, ledoff_pred)
            rmse = np.sqrt(mse)
            threshold = 7.0 * rmse
            threshold_value = threshold

        # if no LEDoff was used, background subtraction is needed
        # there should exist no file named "subtracted"
        elif 'master.fit' in filenameCombined \
             or 'master_normalised.fit' in filenameCombined:
            self.ui.statusbar.showMessage('Using master or master_normalised')

            # create preliminary mask
            """ make_source_mask not yet available in photutils v0.2.1
                wait for v0.3 release
            """
            #from photutils import make_source_mask
            #masterMask = make_source_mask(master, snr=2, npixels=5, dilate_size=11)

            # background subtraction
            """ create 2D image of background and background rms and 
                apply sigma-clipping to each region in the low-res  
                background map to get mean, median, and std/rms. 
                sigma-clipping is the most widely used method though not as 
                good as using mask; still superior to robust standard 
                deviation using median absolute deviation (MAD-STD)
            """

            hdu = fits.open(filepath)[0]
            data = hdu.data
            if 'EXPTIME' in hdu.header:
                exptime = hdu.header['EXPTIME']
            else:
                exptime = hdu.header['EXPREQ']

            self.msg('Determining threshold for target detection...')
            # calculate threashold
            # [mean, median, std] = sigma_clipped_stats(master, sigma=3.0, iters=5)
            bkg = Background(data, (100, 100),
                             filter_shape=(3, 3),
                             method='median')
            # bkg = Background(master, (50, 50), filter_size=(3, 3), method='median')
            # plt.imshow(bkg.background, norm=normalisation, origin='lower', cmap=plt.cm.gray)
            plt.imshow(bkg.background, origin='lower', cmap=plt.cm.gray)
            [fig, ax] = plt.subplots(figsize=(8, 8))
            # make background-substracted image
            data_subtracted = data - bkg.background
            # plot
            plt.imshow(data_subtracted, origin='lower', cmap=plt.cm.gray)

            # save background subtracted image
            if 'master.fit' in filename:
                hdu_subtracted = fits.PrimaryHDU(data_subtracted)
                hdu_subtracted.writeto('master_subtracted.fits', clobber=True)
            elif 'master_normalised.fit' in filename:
                hdu_normalised_subtracted = fits.PrimaryHDU(data_subtracted)
                hdu_normalised_subtracted.writeto(
                    'master_normalised_subtracted.fits', clobber=True)

            # segmentation at a given sigma level, for regional properties
            threshold = 5.0 * bkg.background_rms  # since data is background-subtracted
            threshold_value = threshold.flat[0]

        self.msg('Threshold for target detection is: ' + repr(threshold_value))
        # perform segmentation whether flat was available or not
        self.msg('Performing segmentation...')
        segm = detect_sources(data_subtracted, threshold, npixels=5)

        self.msg('Segmentation labels are:')
        self.msg((str(segm.labels)))
        # measure regional source properties from segmentation
        # the centroid is from image moments, already intensity-weighted
        self.msg('Measuring source properties')
        if 'bkg' in locals():
            props = source_properties(data_subtracted,
                                      segm,
                                      error=bkg.background_rms,
                                      background=bkg.background)
        elif 'master_ledoff_subtracted' in filenameCombined:
            filepath = os.path.join(datasetDirLocal, 'master',
                                    'master_ledoff_subtracted.fits')
            hdu = fits.open(filepath)[0]
            master_ledoff_subtracted = hdu.data
            props = source_properties(data_subtracted,
                                      segm,
                                      error=master_ledoff_subtracted -
                                      np.mean(master_ledoff_subtracted),
                                      background=master_ledoff_subtracted)

        # instrumental magnitude = -2.5 * log10(flux)
        for i in range(len(props)):
            props[i].mag_instr = -2.5 * np.log10(props[i].source_sum / exptime)
            # source_sum are by definition background-subtracted already
        propsTableColumns = [
            'id', 'xcentroid', 'ycentroid', 'area', 'max_value', 'source_sum',
            'mag_instr'
        ]
        # there are other properties available, see list of SourceProperties
        # http://photutils.readthedocs.io/en/latest/api/photutils.segmentation.SourceProperties.html#photutils.segmentation.SourceProperties

        propsTable = properties_table(props, columns=propsTableColumns)
        self.ui.statusbar.showMessage(repr(propsTable))

        # plot segmentated image
        self.rmmpl()
        segFig = Figure()
        cmapRand = random_cmap(segm.max + 1, random_state=12345)
        axes = segFig.add_subplot(111)
        axes.imshow(segm, origin='lower', cmap=cmapRand)
        axes.plot(propsTable['xcentroid'],
                  propsTable['ycentroid'],
                  ls='none',
                  color='red',
                  marker='+',
                  ms=10,
                  lw=1.5)
        self.addmpl(segFig)

        # set properties table font and font size
        self.ui.tablePhot.setCurrentFont(QtGui.QFont('Courier'))
        self.ui.tablePhot.setFontPointSize(9)
        self.ui.tablePhot.setPlainText(repr(propsTable))

        self.msg('Photometry completed')
Exemplo n.º 15
0
#	seg_flux=np.bincount(np.ravel(seg_data), weights=np.ravel(shared_im))
#	for i in range(seg_nid):
#		cat_data[i]=(i, seg_npix[i], seg_flux[i], 0., 0.)

#	cat_data=cat_data[1:]
    sys.exit()

    cat_properties = segment_properties(shared_im,
                                        seg_data,
                                        background=0.,
                                        mask=im_mask_nan)
    cat_columns = [
        'id', 'xcentroid', 'ycentroid', 'segment_sum', 'area',
        'semimajor_axis_sigma', 'semiminor_axis_sigma', 'elongation'
    ]
    cat_data = properties_table(cat_properties, columns=cat_columns)
    gv_finite = (np.isfinite(cat_data['xcentroid'])
                 & np.isfinite(cat_data['ycentroid']))
    cat_data = cat_data[gv_finite]
    gv_sort = np.argsort(cat_data['area'])[::-1]
    cat_data = cat_data[gv_sort]

    gv_mask = (cat_data['area'] > mask_area_min)

    fig = plt.figure()
    ax = plt.gca()
    ax.plot(np.sqrt(cat_data['area']),
            cat_data['segment_sum'],
            linestyle='',
            marker='o',
            markersize=5,
Exemplo n.º 16
0
#	cat_data=np.recarray(seg_nid, dtype={'names':['id','npix','flux','mag','fwhm'], 'formats':['i4','i4','f4','f4','f4']})
#	cat_data.fill(0)

#	print 'Computing the flux and size for the detected sources'
#	seg_npix=np.bincount(np.ravel(seg_data))
#	seg_flux=np.bincount(np.ravel(seg_data), weights=np.ravel(shared_im))
#	for i in range(seg_nid):
#		cat_data[i]=(i, seg_npix[i], seg_flux[i], 0., 0.)

#	cat_data=cat_data[1:]
	sys.exit()

	cat_properties = segment_properties(shared_im, seg_data, background=0., mask=im_mask_nan)
	cat_columns = ['id', 'xcentroid', 'ycentroid', 'segment_sum', 'area', 'semimajor_axis_sigma', 'semiminor_axis_sigma', 'elongation']
	cat_data = properties_table(cat_properties, columns=cat_columns)
	gv_finite=(np.isfinite(cat_data['xcentroid']) & np.isfinite(cat_data['ycentroid']))
	cat_data=cat_data[gv_finite]
	gv_sort=np.argsort(cat_data['area'])[::-1]
	cat_data=cat_data[gv_sort]

	gv_mask=(cat_data['area'] > mask_area_min)

	fig = plt.figure()
	ax = plt.gca()
	ax.plot(np.sqrt(cat_data['area']), cat_data['segment_sum'], linestyle='', marker='o', markersize=5, c='blue', alpha=0.5, markeredgecolor='none')
	ax.plot(np.sqrt(cat_data['area'][gv_mask]), cat_data['segment_sum'][gv_mask], linestyle='', marker='o', markersize=5, c='red', alpha=0.5, markeredgecolor='none')
	ax.set_yscale('log')
	ax.set_xlabel('Sqrt(Area) (pix)')
	ax.set_ylabel('Flux (ADU)')
	fig.savefig('plot_flux_area_tile1_g.pdf', format='pdf')
Exemplo n.º 17
0
def prepare_data(file, data_dir, folder):
    """
    prepare_data picks a file with the image of the galaxy, detect the central object, rotate it to the major axis, and returns 
    the data and errors ready to fit a warp curve, along with the maximum distance from the center 

    """

    # check if there is a folder for the figures, if not create it

    if not os.path.isdir('../figs/' + str(folder)):
        os.mkdir('../figs/' + str(folder))

    # check if there is a folder for the text output, if not create it

    if not os.path.isdir('../output/' + str(folder)):
        os.mkdir('../output/' + str(folder))

    print(data_dir + '/' + str(file))

    hdu = fits.open(data_dir + '/' + str(file[:-1]))[
        0]  #fits.open(data_dir+'/'+str(file[:-1]))[0]
    wcs = WCS(hdu.header)

    data = hdu.data

    sigma_clip = SigmaClip(sigma=3., iters=10)
    bkg_estimator = MedianBackground()
    bkg = Background2D(data, (25, 25),
                       filter_size=(3, 3),
                       sigma_clip=sigma_clip,
                       bkg_estimator=bkg_estimator)

    if (cfg.DATA_TYPE == 'REAL'):
        weight = fits.open(data_dir + '/' + str(file[:-1]))[1].data
    else:
        weight = bkg.background_rms

    threshold = bkg.background + (3. * bkg.background_rms)

    sigma = 2.0 * gaussian_fwhm_to_sigma  # FWHM = 2.
    kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3)
    kernel.normalize()
    segm = detect_sources(data, threshold, npixels=5, filter_kernel=kernel)

    rand_cmap = random_cmap(segm.max + 1, random_state=12345)
    fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 8))
    ax1.imshow(data, origin='lower', cmap='Greys_r')
    ax2.imshow(segm, origin='lower', cmap=rand_cmap)
    plt.savefig('../figs/' + str(folder) + '/' + str(file[:-5]) + 'fig2.png')
    plt.close()

    props = source_properties(data, segm)
    tbl = properties_table(props)

    my_min = 100000.

    x_shape = np.float(data.shape[0])
    y_shape = np.float(data.shape[1])

    r = 3.  # approximate isophotal extent
    apertures = []
    for prop in props:
        position = (prop.xcentroid.value, prop.ycentroid.value)
        a = prop.semimajor_axis_sigma.value * r
        b = prop.semiminor_axis_sigma.value * r
        theta = prop.orientation.value
        apertures.append(EllipticalAperture(position, a, b, theta=theta))
        my_dist = np.sqrt((prop.xcentroid.value - x_shape / 2.)**2 +
                          (prop.ycentroid.value - y_shape / 2.)**2)
        if (my_dist < my_min):
            my_label = prop.id - 1
            my_min = my_dist

    mytheta = props[my_label].orientation.value
    mysize = np.int(np.round(r * props[my_label].semimajor_axis_sigma.value))
    my_x = props[my_label].xcentroid.value
    my_y = props[my_label].ycentroid.value

    mask_obj = np.ones(data.shape, dtype='bool')
    mask_obj[(segm.data != 0) * (segm.data != props[my_label].id)] = 0

    weigth = weight[mask_obj]

    rand_cmap = random_cmap(segm.max + 1, random_state=12345)
    fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 8))
    ax1.imshow(data, origin='lower', cmap='Greys_r')
    ax2.imshow(segm, origin='lower', cmap=rand_cmap)
    for aperture in apertures:
        aperture.plot(color='blue', lw=1.5, alpha=0.5, ax=ax1)
        aperture.plot(color='white', lw=1.5, alpha=1.0, ax=ax2)
    plt.savefig('../figs/' + str(folder) + '/' + str(file[:-5]) + 'fig3.png')
    plt.close()

    data_rot = rotate(data, np.rad2deg(mytheta))
    data_rot = data_rot[data_rot.shape[0] / 2 - 100:data_rot.shape[0] / 2 +
                        100, data_rot.shape[1] / 2 -
                        100:data_rot.shape[1] / 2 + 100]

    w_rot = rotate(weight, np.rad2deg(mytheta))
    w = w_rot[w_rot.shape[0] / 2 - 100:w_rot.shape[0] / 2 + 100,
              w_rot.shape[1] / 2 - 100:w_rot.shape[1] / 2 + 100]

    plt.figure()
    plt.imshow(data_rot, origin='lower', cmap='Greys_r')
    plt.savefig('../figs/' + str(folder) + '/' + str(file[:-5]) + 'fig4.png')
    plt.close()

    newx, newy, newtheta, newsize = find_centroid(data_rot)
    print('old center = ', my_x, my_y, mysize)
    print('new center = ', newx, newy, np.rad2deg(newtheta), newsize)

    x_shape2 = np.float(data_rot.shape[0])
    y_shape2 = np.float(data_rot.shape[1])

    np.savetxt(
        '../output/' + str(folder) + '/' + str(file[:-5]) +
        '_size_xcent_ycent_xy_shape.txt',
        np.array([newsize, newx, newy, x_shape2, y_shape2]))

    return data_rot, w, newsize, newx, newy, x_shape2, y_shape2
Exemplo n.º 18
0
                              segm,
                              error=master_ledoff_subtracted -
                              np.mean(master_ledoff_subtracted),
                              background=master_ledoff_subtracted)

# instrumental magnitude = -2.5 * log10(flux)
for i in range(len(props)):
    props[i].mag_instr = -2.5 * np.log10(props[i].source_sum)
propsTableColumns = [
    'id', 'xcentroid', 'ycentroid', 'area', 'max_value', 'source_sum',
    'mag_instr'
]
# there are other properties available, see list of SourceProperties
# http://photutils.readthedocs.io/en/latest/api/photutils.segmentation.SourceProperties.html#photutils.segmentation.SourceProperties

propsTable = properties_table(props, columns=propsTableColumns)
print(propsTable)

#%% plots for visualisation

apertures = []
for prop in props:
    position = (prop.xcentroid.value, prop.ycentroid.value)
    a = prop.semimajor_axis_sigma.value * 3.0
    b = prop.semiminor_axis_sigma.value * 3.0
    theta = prop.orientation.value
    apertures.append(EllipticalAperture(position, a, b, theta=theta))
norm = ImageNormalize(stretch=SqrtStretch())
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(18, 18))

if 'bkg' in locals():
Exemplo n.º 19
0
    def aperture_photometry(self, path_file_abs):
        """
        aperture photometry from source segmentation
        make_source_mask not yet available in photutils v0.2.1
        wait for v0.3 release

        aperture_photometry() assumes that the data have been
        background-subtracted.

        """

        # create preliminary mask
        #from photutils import make_source_mask
        #masterMask = make_source_mask(master, snr=2, npixels=5, dilate_size=11)

        #        if LEDoff was used, get threshold from LEDoff/background
        #        path_dataset = os.path.dirname(path_file_abs) + os.path.sep
        #        filenameCombined = '\t'.join(
        #            os.listdir(os.path.join(datasetDirLocal, 'master')))
        #        if 'master_ledoff_subtracted' in filename:
        #            self.msg('Using master_ledoff')
        #            # path_file_abs = os.path.join(datasetDir, 'master', filename)
        #            hdu = fits.open(path_file_abs)[0]
        #            data_subtracted = hdu.data
        #            # calculate threadhold
        #            ledoff_pred = np.mean(data_subtracted) * \
        #                np.ones(data_subtracted.shape)
        #            mse = mean_squared_error(data_subtracted, ledoff_pred)
        #            rmse = np.sqrt(mse)
        #            threshold = 7.0 * rmse
        #            threshold_value = threshold
        #         if no LEDoff was used, background subtraction is needed
        #         there should exist no file named "subtracted"
        #         if 'master.fit' in filenameCombined \
        #             or 'master_normalised.fit' in filenameCombined:

        if 'master.fit' in path_file_abs:
            self.msg('Photometry using un-normalised master image')
        elif 'master_normalised.fit' in path_file_abs:
            self.msg('Photometry using normalised master image')

        hdu = fits.open(path_file_abs)[0]
        data = hdu.data

        if 'EXPTIME' in hdu.header:
            exptime = hdu.header['EXPTIME']
        else:
            exptime = hdu.header['EXPREQ']

        # === background subtraction ===
        """
        if no LEDoff was used, background subtraction is needed.
        there should exist no file named "subtracted".

        create 2D image of background and background rms and
        apply sigma-clipping to each region in the low-res
        background map to get mean, median, and std/rms.
        sigma-clipping is the most widely used method though not as
        good as using mask; still superior to robust standard
        deviation using median absolute deviation (MAD-STD).

        """

        # create background
        # [mean, median, std] = sigma_clipped_stats(master, sigma=3.0, iters=5)
        # bkg = Background(master, (50, 50), filter_size=(3, 3), method='median')
        bkg = Background(data, (100, 100),
                         filter_shape=(3, 3),
                         method='median')

        # plot background image
        # plt.imshow(bkg.background, norm=normalisation, origin='lower', cmap=plt.cm.gray)
        plt.imshow(bkg.background, origin='lower', cmap=plt.cm.gray)
        [fig, ax] = plt.subplots(figsize=(8, 8))

        # make background-substracted image
        data_subtracted = data - bkg.background

        # plot subtracted image
        plt.imshow(data_subtracted, origin='lower', cmap=plt.cm.gray)

        # === segmentation at a given sigma level ===
        # perform segmentation whether flat is available or not

        self.msg('Determining threshold for target detection...')
        # because data is background-subtracted
        threshold_array = 5.0 * bkg.background_rms
        # print out threshold value
        threshold_value = threshold_array.flat[0]
        self.msg('Threshold for target detection is: ' + repr(threshold_value))

        self.msg('Detecting sources and performing segmentation...')
        segm = detect_sources(data_subtracted, threshold_array, npixels=5)

        self.msg('Segmentation labels are:')
        self.msg((repr(segm.labels)))

        # === regional properties ===
        # measure regional source properties from segmentation
        # the centroid is from image moments, already intensity-weighted

        self.msg('Measuring source properties...')
        if 'bkg' in locals():
            # use the background determined from master_subtracted
            props = source_properties(data_subtracted,
                                      segm,
                                      error=bkg.background_rms,
                                      background=bkg.background)

#        elif 'master_ledoff_subtracted' in filenameCombined:
#            path_file_abs = os.path.join(
#                datasetDirLocal, 'master', 'master_ledoff_subtracted.fits')
#            hdu = fits.open(path_file_abs)[0]
#            master_ledoff_subtracted = hdu.data
#            props = source_properties(data_subtracted, segm,
#                error = master_ledoff_subtracted \
#                        - np.mean(master_ledoff_subtracted),
#                background = master_ledoff_subtracted)

# add instrumental magnitude to properties
# instrumental magnitude = -2.5 * log10(flux)
        for i in range(len(props)):
            # source_sum is by definition background-subtracted already
            props[i].mag_instr = -2.5 * np.log10(props[i].source_sum / exptime)

        # create table from props object
        # there are other properties available, see list of SourceProperties:
        # http://goo.gl/rkfQ9V

        props_table_columns = [
            'id', 'xcentroid', 'ycentroid', 'area', 'max_value', 'source_sum',
            'mag_instr'
        ]
        props_table_display = properties_table(props,
                                               columns=props_table_columns)
        props_table_save = properties_table(props)
        #        self.msg(repr(props_table_display))
        print(repr(props_table_display))

        # check and create analysis folder if it doesn't exist
        path_dataset = os.path.dirname(path_file_abs)
        path_analysis = path_dataset.replace('/data/images/fpc/',
                                             '/data/images/fpc_analysis/')
        if not os.path.exists(path_analysis):
            os.makedirs(path_analysis)

        # save background subtracted image
        if 'master.fit' in path_file_abs:
            path_save = os.path.join(path_dataset, 'master_subtracted.fits')
        elif 'master_object.fit' in path_file_abs:
            path_save = os.path.join(path_dataset,
                                     'master_object_subtracted.fits')
        elif 'master_normalised.fit' in path_file_abs:
            path_save = os.path.join(path_dataset,
                                     'master_normalised_subtracted.fits')
        hdu_subtracted = fits.PrimaryHDU(data_subtracted)
        hdu_subtracted.writeto(path_save, clobber=True)

        # save properties to table file
        path_save = os.path.join(path_dataset, 'props_table.csv')
        ascii.write(props_table_save, path_save, format='csv')

        # === update UI ===
        # plot segmentated image
        self.rmmpl()
        figure_photometry = Figure()
        cmap_rand = random_cmap(segm.max + 1, random_state=12345)
        axes = figure_photometry.add_subplot(111)
        axes.imshow(segm, origin='lower', cmap=cmap_rand)
        axes.plot(props_table_save['xcentroid'],
                  props_table_save['ycentroid'],
                  ls='none',
                  color='red',
                  marker='+',
                  ms=10,
                  lw=1.5)
        self.addmpl(figure_photometry)

        # set properties table font and font size
        self.ui.tablePhot.setCurrentFont(QtGui.QFont(TEXT_BROWSER_FONT))
        self.ui.tablePhot.setFontPointSize(TEXT_BROWSER_FONT_SIZE)
        self.ui.tablePhot.setPlainText(repr(props_table_display))

        self.msg('Photometry completed for {}.'.format(path_file_abs))
Exemplo n.º 20
0
# get labels of hot pixels from master bias and master dark
# pixel label, x coord, y coord, master bias, sd, master dark, sd, master object, sd, 

os.chdir(r'C:\Users\givoltage\Google Drive\DESI\protoDESI\ssl\20160524')

filepath_bias = r'C:\Users\givoltage\Downloads\20160718_-15c\1.0.fit'
filepath_dark = r'C:\Users\givoltage\Google Drive\DESI\protoDESI\ssl\20160524\1sec_dark_009.FIT'

m_bias = fits.open(filepath_bias)[0].data
m_dark = fits.open(filepath_dark)[0].data

segm_bias = detect_sources(m_bias, 1300, npixels=1)
segm_dark = detect_sources(m_dark, 1300, npixels=1)

props_bias = source_properties(m_bias, segm_bias)
props_dark = source_properties(m_dark, segm_dark)

# propsTableColumns = ['id', 'xcentroid', 'ycentroid', 'max_value', 'min_value','source_sum']
table_bias = properties_table(props_bias)
table_dark = properties_table(props_dark)

ascii.write(table_bias, '1.0.csv', format = 'csv')
ascii.write(table_dark, 'protodesi-FPC_00000009(1).csv', format = 'csv')
#log_bias = open('labels_bias.log', 'w')
#log_dark = open('labels_dark.log', 'w')
#
#log_bias.write(log_bias_text)
#log_dark.write(log_dark_text)
#
#log_bias.close()
#log_dark.close()