def test_current_slit(glue_gui):
    """Test the UI currently available for testing."""
    mosviz_gui = glue_gui.viewers[0][0]
    slit_controller = mosviz_gui.slit_controller

    if "slit_width" in mosviz_gui.catalog.meta["special_columns"] and \
            "slit_length" in mosviz_gui.catalog.meta["special_columns"] and \
            mosviz_gui.cutout_wcs is not None:
        assert slit_controller.has_slits
        row = mosviz_gui.current_row
        ra = row[mosviz_gui.catalog.meta["special_columns"]["slit_ra"]]
        dec = row[mosviz_gui.catalog.meta["special_columns"]["slit_dec"]]
        ang_width = row[mosviz_gui.catalog.meta["special_columns"]["slit_width"]]
        ang_length = row[mosviz_gui.catalog.meta["special_columns"]["slit_length"]]

        wcs = mosviz_gui.cutout_wcs

        skycoord = SkyCoord(ra, dec, frame='fk5', unit="deg")
        xp, yp = skycoord.to_pixel(wcs)

        scale = np.sqrt(proj_plane_pixel_area(wcs)) * 3600.

        dx = ang_width / scale
        dy = ang_length / scale

        check_is_close(dx, slit_controller.slits[0].dx)
        check_is_close(dy, slit_controller.slits[0].dy)
        check_is_close(xp, slit_controller.slits[0].x)
        check_is_close(yp, slit_controller.slits[0].y)
Exemple #2
0
    def solid_angle(self):
        """Get solid angle of the region.

        Returns
        -------
        angle : `~astropy.units.Quantity`
            Solid angle of the region. In sr.
            Units: ``sr``
        """
        if self.region is None:
            raise ValueError("Region definition required.")

        # compound regions do not implement area()
        # so we use the mask represenation and estimate the area
        # from the pixels in the mask using oversampling
        if isinstance(self.region, CompoundSkyRegion):
            # oversample by a factor of ten
            oversampling = 10.0
            wcs = self.to_binsz_wcs(self.binsz_wcs / oversampling).wcs
            pixel_region = self.region.to_pixel(wcs)
            mask = pixel_region.to_mask()
            area = np.count_nonzero(mask) / oversampling**2
        else:
            # all other types of regions should implement area
            area = self.region.to_pixel(self.wcs).area

        solid_angle = area * proj_plane_pixel_area(self.wcs) * u.deg ** 2
        return solid_angle.to("sr")
def test_current_slit(glue_gui):
    """Test the UI currently available for testing."""
    mosviz_gui = glue_gui.viewers[0][0]
    slit_controller = mosviz_gui.slit_controller

    if "slit_width" in mosviz_gui.catalog.meta["special_columns"] and \
            "slit_length" in mosviz_gui.catalog.meta["special_columns"] and \
            mosviz_gui.cutout_wcs is not None:
        assert slit_controller.has_slits
        row = mosviz_gui.current_row
        ra = row[mosviz_gui.catalog.meta["special_columns"]["slit_ra"]]
        dec = row[mosviz_gui.catalog.meta["special_columns"]["slit_dec"]]
        ang_width = row[mosviz_gui.catalog.meta["special_columns"]
                        ["slit_width"]]
        ang_length = row[mosviz_gui.catalog.meta["special_columns"]
                         ["slit_length"]]

        wcs = mosviz_gui.cutout_wcs

        skycoord = SkyCoord(ra, dec, frame='fk5', unit="deg")
        xp, yp = skycoord.to_pixel(wcs)

        scale = np.sqrt(proj_plane_pixel_area(wcs)) * 3600.

        dx = ang_width / scale
        dy = ang_length / scale

        check_is_close(dx, slit_controller.slits[0].dx)
        check_is_close(dy, slit_controller.slits[0].dy)
        check_is_close(xp, slit_controller.slits[0].x)
        check_is_close(yp, slit_controller.slits[0].y)
Exemple #4
0
    def __init__(self, ra_center, dec_center, pixel_size_deg, npix_height, npix_width):

        assert npix_height % 2 == 0, "Number of height pixels must be even"
        assert npix_width % 2 == 0, "Number of width pixels must be even"

        self._npix_height = npix_height
        self._npix_width = npix_width

        assert 0 <= ra_center <= 360.0, "Right Ascension must be between 0 and 360"
        assert -90.0 <= dec_center <= 90.0, "Declination must be between -90.0 and 90.0"

        self._ra_center = float(ra_center)
        self._dec_center = float(dec_center)

        self._pixel_size_deg = float(pixel_size_deg)

        # Build projection, i.e., a World Coordinate System object

        self._wcs = WCS(_get_header(ra_center, dec_center, pixel_size_deg, 'icrs', npix_height, npix_width))

        # Pre-compute all R.A., Decs
        self._ras, self._decs = _get_all_ra_dec(self._wcs, npix_height, npix_width)

        # Make sure we have the right amount of coordinates
        assert self._ras.shape[0] == self._decs.shape[0]
        assert self._ras.shape[0] == npix_width * npix_height

        # Pre-compute pixel area
        self._pixel_area= proj_plane_pixel_area(self._wcs)
Exemple #5
0
    def solid_angle(self):
        if self.region is None:
            raise ValueError("Region definition required.")

        area = self.region.to_pixel(self.wcs).area
        solid_angle = area * proj_plane_pixel_area(self.wcs) * u.deg ** 2
        return solid_angle.to("sr")
Exemple #6
0
 def pixel_area(self):
     if self.wcs is None:
         return None
     try:
         top_unit = u.Unit(self.wcs.wcs.cunit[0])
         return proj_plane_pixel_area(self.wcs) * (top_unit ** 2) / u.pix
     except (ValueError, AttributeError):
         return None
Exemple #7
0
def convert_to_imagecoord(shape, header):
    """Convert the coordlist of `shape` to image coordinates

    Parameters
    ----------
    shape : `pyregion.parser_helper.Shape`
        The `Shape` to convert coordinates

    header : `~astropy.io.fits.Header`
        Specifies what WCS transformations to use.

    Returns
    -------
    new_coordlist : list
        A list of image coordinates defining the shape.

    """
    arg_types = _generate_arg_types(len(shape.coord_list), shape.name)

    new_coordlist = []
    is_even_distance = True
    coord_list_iter = iter(zip(shape.coord_list, arg_types))

    new_wcs = WCS(header)
    pixel_scales = proj_plane_pixel_scales(new_wcs)

    for coordinate, coordinate_type in coord_list_iter:
        if coordinate_type == CoordOdd:
            even_coordinate = next(coord_list_iter)[0]

            old_coordinate = SkyCoord(coordinate, even_coordinate,
                                      frame=shape.coord_format, unit='degree',
                                      obstime='J2000')
            new_coordlist.extend(
                np.asscalar(x)
                for x in old_coordinate.to_pixel(new_wcs, origin=1)
            )

        elif coordinate_type == Distance:
            if arg_types[-1] == Angle:
                degree_per_pixel = pixel_scales[0 if is_even_distance else 1]

                is_even_distance = not is_even_distance
            else:
                degree_per_pixel = np.sqrt(proj_plane_pixel_area(new_wcs))

            new_coordlist.append(coordinate / degree_per_pixel)

        elif coordinate_type == Angle:
            new_angle = _estimate_angle(coordinate,
                                        shape.coord_format,
                                        header)
            new_coordlist.append(new_angle)

        else:
            new_coordlist.append(coordinate)

    return new_coordlist
 def pixel_area(self):
     if self.wcs is None:
         return None
     try:
         top_unit = u.Unit(self.wcs.wcs.cunit[0])
         pixel_area = (proj_plane_pixel_area(self.wcs) * (top_unit ** 2) / u.pix).to(u.arcsec ** 2 / u.pix)
         return pixel_area
     except (ValueError, AttributeError):
         return None
Exemple #9
0
def convert_to_imagecoord(shape, header):
    """Convert the coordlist of `shape` to image coordinates

    Parameters
    ----------
    shape : `pyregion.parser_helper.Shape`
        The `Shape` to convert coordinates

    header : `~astropy.io.fits.Header`
        Specifies what WCS transformations to use.

    Returns
    -------
    new_coordlist : list
        A list of image coordinates defining the shape.

    """
    arg_types = _generate_arg_types(len(shape.coord_list), shape.name)

    new_coordlist = []
    is_even_distance = True
    coord_list_iter = iter(zip(shape.coord_list, arg_types))

    new_wcs = WCS(header)
    pixel_scales = proj_plane_pixel_scales(new_wcs)

    for coordinate, coordinate_type in coord_list_iter:
        if coordinate_type == CoordOdd:
            even_coordinate = next(coord_list_iter)[0]

            old_coordinate = SkyCoord(coordinate,
                                      even_coordinate,
                                      frame=shape.coord_format,
                                      unit='degree',
                                      obstime='J2000')
            new_coordlist.extend(
                np.asscalar(x)
                for x in old_coordinate.to_pixel(new_wcs, origin=1))

        elif coordinate_type == Distance:
            if arg_types[-1] == Angle:
                degree_per_pixel = pixel_scales[0 if is_even_distance else 1]

                is_even_distance = not is_even_distance
            else:
                degree_per_pixel = np.sqrt(proj_plane_pixel_area(new_wcs))

            new_coordlist.append(coordinate / degree_per_pixel)

        elif coordinate_type == Angle:
            new_angle = _estimate_angle(coordinate, shape.coord_format, header)
            new_coordlist.append(new_angle)

        else:
            new_coordlist.append(coordinate)

    return new_coordlist
    def __init__(self, ra_center, dec_center, pixel_size_deg, npix_height,
                 npix_width):

        assert npix_height % 2 == 0, "Number of height pixels must be even"
        assert npix_width % 2 == 0, "Number of width pixels must be even"

        if isinstance(npix_height, float):

            assert npix_height.is_integer(), "This is a bug"

        if isinstance(npix_width, float):

            assert npix_width.is_integer(), "This is a bug"

        self._npix_height = int(npix_height)
        self._npix_width = int(npix_width)

        assert 0 <= ra_center <= 360.0, "Right Ascension must be between 0 and 360"
        assert -90.0 <= dec_center <= 90.0, "Declination must be between -90.0 and 90.0"

        self._ra_center = float(ra_center)
        self._dec_center = float(dec_center)

        self._pixel_size_deg = float(pixel_size_deg)

        # Build projection, i.e., a World Coordinate System object

        self._wcs = WCS(
            _get_header(ra_center, dec_center, pixel_size_deg, 'icrs',
                        npix_height, npix_width))

        # Pre-compute all R.A., Decs
        self._ras, self._decs = _get_all_ra_dec(self._wcs, npix_height,
                                                npix_width)

        # Make sure we have the right amount of coordinates
        assert self._ras.shape[0] == self._decs.shape[0]
        assert self._ras.shape[0] == npix_width * npix_height

        # Pre-compute pixel area
        self._pixel_area = proj_plane_pixel_area(self._wcs)

        # Pre-compute an oversampled version to be used for PSF integration
        # if oversample and pixel_size_deg > 0.025:
        #
        #     self._oversampled, self._oversample_factor = self._oversample(new_pixel_size=0.025)
        #
        # else:
        #
        #     self._oversampled = self
        #     self._oversample_factor = 1

        # Cache for angular distances from a point (see get_spherical_distances_from)
        self._distance_cache = {}
 def test_crpix_maps_to_crval(self):
     w = Cutout2D(self.data, (0, 0), (3, 3), wcs=self.sipwcs,
                  mode='partial').wcs
     pscale = np.sqrt(proj_plane_pixel_area(w))
     assert_allclose(
         w.wcs_pix2world(*w.wcs.crpix, 1), w.wcs.crval,
         rtol=0.0, atol=1e-6 * pscale
     )
     assert_allclose(
         w.all_pix2world(*w.wcs.crpix, 1), w.wcs.crval,
         rtol=0.0, atol=1e-6 * pscale
     )
Exemple #12
0
def _get_trace(trace_name, db, model=None):
    """
    Get the trace array for the specified parameter key. Also handles lookup and
    calculation of the special keys magdiff, centerdist, axisratio, and sbeff.
    e.g. 1_PSF_2_Sersic_centerdist or 2_Sersic_sbeff
    :param trace_name: Name of the model parameter to get the trace for
    :param db: astropy Table object to get trace from
    :return: Trace values as NxD array, where N is the number of samples and D
        is the number of dimensions of the parameter (2 for xy, 1 for others)
    """
    try:
        name_comps = trace_name.split('_')
        if 'magdiff' in name_comps:
            key_mag1 = '_'.join(name_comps[0:2] + ['mag'])
            key_mag2 = '_'.join(name_comps[2:4] + ['mag'])
            trace = db[key_mag1] - db[key_mag2]
        elif 'centerdist' in name_comps:
            key_xy1 = '_'.join(name_comps[0:2] + ['xy'])
            key_xy2 = '_'.join(name_comps[2:4] + ['xy'])
            cdiff = db[key_xy1] - db[key_xy2]
            trace = np.sqrt(np.sum(cdiff**2, axis=1))
        elif 'axisratio' in name_comps:
            key_prefix = '_'.join(name_comps[0:2] + [''])
            trace = db[key_prefix + 'reff_b'] / db[key_prefix + 'reff']
        elif 'sbeff' in name_comps:
            key_prefix = '_'.join(name_comps[0:2] + [''])
            trace = mag_to_flux(db[key_prefix + 'mag'], 0)
            trace = Sersic.sb_eff(trace, db[key_prefix + 'index'],
                                  db[key_prefix + 'reff'],
                                  db[key_prefix + 'reff_b'])
            if model is not None:
                wcs = WCS(model.obs_header)
                px_area = proj_plane_pixel_area(wcs)
                px_area *= 3600**2  # to sq arcsec
                trace /= px_area
            trace = -2.5 * np.log10(trace)
        else:
            trace = db[trace_name]
    except KeyError as err:
        names = db.colnames
        err.message = 'Unable to find trace {} while plotting {}. Available '\
            'traces are {} or magdiff, centerdist, axisratio, sbeff'\
            .format(err, trace_name, names)
        raise err

    # For 1D traces (all except XY coordinates), expand to Nx1 2D array
    if len(trace.shape) == 1:
        trace = np.expand_dims(trace, 1)
    return trace
Exemple #13
0
    def solid_angle(self):
        """Get solid angle of the region.

        Returns
        -------
        angle : `~astropy.units.Quantity`
            Solid angle of the region. In sr.
            Units: ``sr``
        """
        if self.region is None:
            raise ValueError("Region definition required.")

        area = self.region.to_pixel(self.wcs).area
        solid_angle = area * proj_plane_pixel_area(self.wcs) * u.deg**2
        return solid_angle.to("sr")
Exemple #14
0
def calib_phot(input_value, img, output_units='MJy'):
    '''
    Convert the aperture_sum value to output_units
    input: input_value: value to be converted, with units
           img: image HDU, for ancillary/calibration data
           output_units: units to convert output value to

    output: calibrated value
    '''
    #  do we already have unit information?
    if input_value.unit.is_unity(): # means unit isn't given in table

        # so try to figure out what to do from image header
        hdr = img[0].header
        if 'BUNIT' in hdr:
            # shouldn't get here if coming from photutils, but anyway..
            obs_val = input_value * u.Unit(hdr['BUNIT']) # might fail if BUNIT badly formatted..
        elif 'MAGZP' and 'VEGAFLUX' in hdr: # convert from mag to Jansky
            print 'magnitude to flux'
            mag = hdr['MAGZP'] - 2.5*np.log10(input_value)
            obs_val = hdr['VEGAFLUX']*10.0**(-0.4*mag) * u.Jy
        else:
            print 'Not enough info to calibrate'
    # surface-brightness to flux conversion
    elif input_value.unit == u.MJy/u.sr: #        (this is not perfectly general but oh well)
        print 'surface brightness to flux'
        hdr = img[0].header
        wcs = WCS(hdr)
        # proj_plane_pixel_area returns values in same units as CDELT,etc: likely deg^2
        pxarea = (proj_plane_pixel_area(wcs) * (u.degree**2)).to(u.arcsec**2) 
        intermed = input_value.to(u.Jy/u.arcsec**2) # not strictly necessary but easier to follow
        obs_val = intermed * pxarea
    else:
        obs_val = input_value

    #  now do the conversion
    try:
        calib_val = obs_val.to(output_units).value
    except UnitsError:
        print 'Problem with unit conversion'
        return(None)

    return(calib_val)
from taskinit import iatool

ia = iatool()

iram_cube = SpectralCube.read(
    "/home/ekoch/bigdata/ekoch/M33/co21/noema/m33.co21_iram.noema_regrid.fits")
beam = iram_cube.beam

ia.open("line_imaging/M33-ARM05_stage1.pb")
noema_pb = ia.getchunk().squeeze()
ia.close()

# Convert to Jy/pixel. K -> Jy/beam not yet working in spectral-cube so do it
# by-hand
beam_per_pix = (proj_plane_pixel_area(iram_cube.wcs) * u.deg**2).to(
    u.sr) / beam.sr
iram_data = ((iram_cube.filled_data[:] /
              iram_cube.beam.jtok(230.538 * u.GHz))) * beam_per_pix

iram_data[np.where(noema_pb.T == 0.0)] = 0.0

tmp = ia.newimagefromimage(
    infile="line_imaging/M33-ARM05_stage1.model",
    outfile="line_imaging/sdmodel_tests/M33-ARM05.iram_model.image",
    overwrite=True)

# Set the beam
tmp.setrestoringbeam(major="{0}{1}".format(beam.major.value, beam.major.unit),
                     minor="{0}{1}".format(beam.minor.value, beam.minor.unit),
                     pa="{0}{1}".format(beam.pa.value, beam.pa.unit))
Exemple #16
0
def wcs_project(ccd, target_wcs, target_shape=None, order='bilinear'):
    """
    Given a CCDData image with WCS, project it onto a target WCS and
    return the reprojected data as a new CCDData image.

    Any flags, weight, or uncertainty are ignored in doing the
    reprojection.

    Parameters
    ----------
    ccd : `~ccdproc.CCDData`
        Data to be projected.

    target_wcs : `~astropy.wcs.WCS` object
        WCS onto which all images should be projected.

    target_shape : two element list-like or None, optional
        Shape of the output image. If omitted, defaults to the shape of the
        input image.
        Default is ``None``.

    order : str, optional
        Interpolation order for re-projection. Must be one of:

        + 'nearest-neighbor'
        + 'bilinear'
        + 'biquadratic'
        + 'bicubic'

        Default is ``'bilinear'``.

    {log}

    Returns
    -------
    ccd : `~ccdproc.CCDData`
        A transformed CCDData object.
    """
    from reproject import reproject_interp

    if not (ccd.wcs.is_celestial and target_wcs.is_celestial):
        raise ValueError('one or both WCS is not celestial.')

    if target_shape is None:
        target_shape = ccd.shape

    projected_image_raw, _ = reproject_interp((ccd.data, ccd.wcs),
                                              target_wcs,
                                              shape_out=target_shape,
                                              order=order)

    reprojected_mask = None
    if ccd.mask is not None:
        reprojected_mask, _ = reproject_interp((ccd.mask, ccd.wcs),
                                               target_wcs,
                                               shape_out=target_shape,
                                               order=order)
        # Make the mask 1 if the reprojected mask pixel value is non-zero.
        # A small threshold is included to allow for some rounding in
        # reproject_interp.
        reprojected_mask = reprojected_mask > 1e-8

    # The reprojection will contain nan for any pixels for which the source
    # was outside the original image. Those should be masked also.
    output_mask = np.isnan(projected_image_raw)

    if reprojected_mask is not None:
        output_mask = output_mask | reprojected_mask

    # Need to scale counts by ratio of pixel areas
    area_ratio = (proj_plane_pixel_area(target_wcs) /
                  proj_plane_pixel_area(ccd.wcs))

    # If nothing ended up masked, don't create a mask.
    if not output_mask.any():
        output_mask = None

    nccd = CCDData(area_ratio * projected_image_raw, wcs=target_wcs,
                   mask=output_mask,
                   header=ccd.header, unit=ccd.unit)

    return nccd
Exemple #17
0
    def render_data(self,
                    row,
                    spec1d_data=None,
                    spec2d_data=None,
                    image_data=None,
                    level2_data=None):
        """
        Render the updated data sets in the individual plot widgets within the
        MOSViz viewer.
        """
        self._check_unsaved_comments()

        if spec1d_data is not None:

            spectrum1d_x = spec1d_data[spec1d_data.id['Wavelength']]
            spectrum1d_y = spec1d_data[spec1d_data.id['Flux']]
            spectrum1d_yerr = spec1d_data[spec1d_data.id['Uncertainty']]

            self.spectrum1d_widget.set_data(x=spectrum1d_x,
                                            y=spectrum1d_y,
                                            yerr=spectrum1d_yerr)

            # Try to retrieve the wcs information
            try:
                flux_unit = spec1d_data.header.get('BUNIT', 'Jy').lower()
                flux_unit = flux_unit.replace('counts', 'count')
                flux_unit = u.Unit(flux_unit)
            except ValueError:
                flux_unit = u.Unit("Jy")

            try:
                disp_unit = spec1d_data.header.get('CUNIT1',
                                                   'Angstrom').lower()
                disp_unit = u.Unit(disp_unit)
            except ValueError:
                disp_unit = u.Unit("Angstrom")

            self.spectrum1d_widget.axes.set_xlabel(
                "Wavelength [{}]".format(disp_unit))
            self.spectrum1d_widget.axes.set_ylabel(
                "Flux [{}]".format(flux_unit))
        else:
            self.spectrum1d_widget.no_data()

        if image_data is not None:
            if not self.image_widget.isVisible():
                self.image_widget.setVisible(True)
            wcs = image_data.coords.wcs
            self.cutout_wcs = wcs

            array = image_data.get_component(image_data.id['Flux']).data

            # Add the slit patch to the plot
            self.slit_controller.clear_slits()
            if "slit_width" in self.catalog.meta["special_columns"] and \
                    "slit_length" in self.catalog.meta["special_columns"] and \
                    wcs is not None:
                self.add_slit(row)
                self.image_widget.draw_slit()
            else:
                self.image_widget.reset_limits()

            self.image_widget.set_image(array,
                                        wcs=wcs,
                                        interpolation='none',
                                        origin='lower')

            self.image_widget.axes.set_xlabel("Spatial X")
            self.image_widget.axes.set_ylabel("Spatial Y")
            if self.slit_controller.has_slits:
                self.image_widget.set_slit_limits()

            self.image_widget._redraw()
        else:
            self.cutout_wcs = None
            self.image_widget.setVisible(False)

        # Plot the 2D spectrum data last because by then we can make sure that
        # we set up the extent of the image appropriately if the cutout and the
        # 1D spectrum are present so that the axes can be locked.

        # We are repurposing the spectrum 2d widget to handle the display of both
        # the level 3 and level 2 spectra.
        if spec2d_data is not None or level2_data is not None:

            # These are probably retrievable from the slit controller.
            scale = np.sqrt(proj_plane_pixel_area(wcs)) * 3600.
            slit_length = row[self.catalog.meta["special_columns"]
                              ["slit_length"]]
            dy = slit_length / scale
            ra = row[self.catalog.meta["special_columns"]
                     ["slit_ra"]] * u.degree
            dec = row[self.catalog.meta["special_columns"]
                      ["slit_dec"]] * u.degree
            skycoord = SkyCoord(ra, dec, frame='fk5')
            xp, yp = skycoord.to_pixel(wcs)

            self._load_spectrum2d_widget(dy, yp, image_data, spec2d_data,
                                         level2_data)
        else:
            self.spectrum2d_widget.no_data()

        # Clear the meta information widget
        # NOTE: this process is inefficient
        for i in range(self.meta_form_layout.count()):
            wid = self.meta_form_layout.itemAt(i).widget()
            label = self.meta_form_layout.labelForField(wid)

            if label is not None:
                label.deleteLater()

            wid.deleteLater()

        # Repopulate the form layout
        # NOTE: this process is inefficient
        for col in row.colnames:
            if col.lower() not in ["comments", "flag"]:
                line_edit = QLineEdit(str(row[col]),
                                      self.central_widget.meta_form_widget)
                line_edit.setReadOnly(True)

                self.meta_form_layout.addRow(col, line_edit)

        # Set up comment and flag input/display boxes
        if self.comments:
            if self.savepath is not None:
                if self.savepath == -1:
                    line_edit = QLineEdit(
                        os.path.basename("Not Saving to File."),
                        self.central_widget.meta_form_widget)
                    line_edit.setReadOnly(True)
                    self.meta_form_layout.addRow("Save File", line_edit)
                else:
                    line_edit = QLineEdit(os.path.basename(self.savepath),
                                          self.central_widget.meta_form_widget)
                    line_edit.setReadOnly(True)
                    self.meta_form_layout.addRow("Save File", line_edit)

            self.input_flag = QLineEdit(self.get_flag(),
                                        self.central_widget.meta_form_widget)
            self.input_flag.textChanged.connect(self._text_changed)
            self.input_flag.setStyleSheet(
                "background-color: rgba(255, 255, 255);")
            self.meta_form_layout.addRow("Flag", self.input_flag)

            self.input_comments = QPlainTextEdit(
                self.get_comment(), self.central_widget.meta_form_widget)
            self.input_comments.textChanged.connect(self._text_changed)
            self.input_comments.setStyleSheet(
                "background-color: rgba(255, 255, 255);")
            self.meta_form_layout.addRow("Comments", self.input_comments)

            self.input_save = QPushButton('Save',
                                          self.central_widget.meta_form_widget)
            self.input_save.clicked.connect(self.update_comments)
            self.input_save.setDefault(True)

            self.input_refresh = QPushButton(
                'Reload', self.central_widget.meta_form_widget)
            self.input_refresh.clicked.connect(self.refresh_comments)

            self.meta_form_layout.addRow(self.input_save, self.input_refresh)
size = 2 * u.arcsec

cutout_3mm = Cutout2D(cont3mm[0].data.squeeze(),
                      cutout_center,
                      size,
                      wcs=wcs.WCS(cont3mm[0].header).celestial)
cutout_7mm = Cutout2D(cont7mm[0].data.squeeze(),
                      cutout_center,
                      size,
                      wcs=wcs.WCS(cont7mm[0].header).celestial)
proj_7mmto3mm, _ = reproject.reproject_interp(
    (cutout_7mm.data, cutout_7mm.wcs),
    cutout_3mm.wcs,
    shape_out=cutout_3mm.shape)

pixscale = wcsutils.proj_plane_pixel_area(cutout_3mm.wcs)**0.5 * u.deg

errest = stats.mad_std(cutout_3mm.data)

chi2shift = image_registration.chi2_shift(proj_7mmto3mm,
                                          cutout_3mm.data,
                                          err=errest,
                                          upsample_factor=1000)
print(chi2shift)
print(chi2shift[:2] * cutout_3mm.wcs.wcs.cdelt * 3600)
print(chi2shift[:2] * cutout_3mm.wcs.wcs.cdelt)
print((((chi2shift[:2] * cutout_3mm.wcs.wcs.cdelt * 3600)**2).sum())**0.5)
"""
[-4.295500000000004, 3.9625000000000057, 0.0034999999999999892, 0.003500000000000003]
[0.0214775 0.0198125]
[5.96597222e-06 5.50347222e-06]
Exemple #19
0
def _reproject_celestial(array,
                         wcs_in,
                         wcs_out,
                         shape_out,
                         parallel=True,
                         return_footprint=True):

    # Check the parallel flag.
    if type(parallel) != bool and type(parallel) != int:
        raise TypeError(
            "The 'parallel' flag must be a boolean or integral value")

    if type(parallel) == int:
        # parallel is a number of processes.
        if parallel <= 0:
            raise ValueError(
                "The number of processors to use must be strictly positive")
        nproc = parallel
    else:
        # parallel is a boolean flag. nproc = None here means automatically selected
        # number of processes.
        nproc = None if parallel else 1

    # There are currently precision issues below certain resolutions, so we
    # emit a warning if this is the case. For more details, see:
    # https://github.com/astropy/reproject/issues/199
    area_threshold = (0.05 / 3600)**2
    if ((isinstance(wcs_in, WCS)
         and proj_plane_pixel_area(wcs_in) < area_threshold)
            or (isinstance(wcs_out, WCS)
                and proj_plane_pixel_area(wcs_out) < area_threshold)):
        warnings.warn(
            "The reproject_exact function currently has precision "
            "issues with images that have resolutions below ~0.05 "
            "arcsec, so the results may not be accurate.", UserWarning)

    # Convert input array to float values. If this comes from a FITS, it might have
    # float32 as value type and that can break things in Cython
    array = np.asarray(array, dtype=float)

    # TODO: make this work for n-dimensional arrays
    if wcs_in.pixel_n_dim != 2:
        raise NotImplementedError(
            "Only 2-dimensional arrays can be reprojected at this time")

    # TODO: at the moment, we compute the coordinates of all of the corners,
    # but we might want to do it in steps for large images.

    # Start off by finding the world position of all the corners of the input
    # image in world coordinates

    ny_in, nx_in = array.shape

    x = np.arange(nx_in + 1.) - 0.5
    y = np.arange(ny_in + 1.) - 0.5

    xp_in, yp_in = np.meshgrid(x, y, indexing='xy', sparse=False, copy=False)

    world_in = wcs_in.pixel_to_world(xp_in, yp_in)

    # Now compute the world positions of all the corners in the output header

    ny_out, nx_out = shape_out

    x = np.arange(nx_out + 1.) - 0.5
    y = np.arange(ny_out + 1.) - 0.5

    xp_out, yp_out = np.meshgrid(x, y, indexing='xy', sparse=False, copy=False)

    world_out = wcs_out.pixel_to_world(xp_out, yp_out)

    # Convert the input world coordinates to the frame of the output world
    # coordinates.

    world_in = world_in.transform_to(world_out.frame)

    # Finally, compute the pixel positions in the *output* image of the pixels
    # from the *input* image.

    xp_inout, yp_inout = wcs_out.world_to_pixel(world_in)

    world_in_unitsph = world_in.represent_as('unitspherical')
    xw_in, yw_in = world_in_unitsph.lon.to_value(
        u.deg), world_in_unitsph.lat.to_value(u.deg)

    world_out_unitsph = world_out.represent_as('unitspherical')
    xw_out, yw_out = world_out_unitsph.lon.to_value(
        u.deg), world_out_unitsph.lat.to_value(u.deg)

    # Put together the parameters common both to the serial and parallel implementations. The aca
    # function is needed to enforce that the array will be contiguous when passed to the low-level
    # raw C function, otherwise Cython might complain.

    aca = np.ascontiguousarray
    common_func_par = [
        0, ny_in, nx_out, ny_out,
        aca(xp_inout),
        aca(yp_inout),
        aca(xw_in),
        aca(yw_in),
        aca(xw_out),
        aca(yw_out),
        aca(array), shape_out
    ]

    if nproc == 1:

        array_new, weights = _reproject_slice([0, nx_in] + common_func_par)

        with np.errstate(invalid='ignore'):
            array_new /= weights

        return array_new, weights

    elif (nproc is None or nproc > 1):

        from multiprocessing import Pool, cpu_count

        # If needed, establish the number of processors to use.
        if nproc is None:
            nproc = cpu_count()

        # Prime each process in the pool with a small function that disables
        # the ctrl+c signal in the child process.
        pool = Pool(nproc, _init_worker)

        inputs = []
        for i in range(nproc):
            start = int(nx_in) // nproc * i
            end = int(nx_in) if i == nproc - 1 else int(nx_in) // nproc * (i +
                                                                           1)
            inputs.append([start, end] + common_func_par)

        results = pool.map(_reproject_slice, inputs)

        pool.close()

        array_new, weights = zip(*results)

        array_new = sum(array_new)
        weights = sum(weights)

        with np.errstate(invalid='ignore'):
            array_new /= weights

        if return_footprint:
            return array_new, weights
        else:
            return array_new
Exemple #20
0
    # calculate offsets against original pointing before
    # "apply_pointing_corrections" is applied
    if 'CRVAL1A' in fh.header:
        fh.header['CRVAL1'] = fh.header['CRVAL1A']
        fh.header['CRVAL2'] = fh.header['CRVAL2A']

    if regname == 'G34':
        # fix bad stripe by removing it
        fh.data[:, 2052:2060] = np.nan
        fh.data[2125:2140, 2045:2060] = np.nan

    offset[regname] = {}

    ww = WCS(fh.header)
    center = ww.wcs_pix2world(fh.data.shape[1] / 2, fh.data.shape[0] / 2, 0)
    mgps_pixscale = wcsutils.proj_plane_pixel_area(ww)**0.5 * u.deg

    if regname in center_coordinate:
        coordinate = center_coordinate[regname]
    else:
        coordinate = coordinates.SkyCoord(
            center[0],
            center[1],
            unit=(u.deg, u.deg),
            frame=wcsutils.wcs_to_celestial_frame(ww))
    radius = np.max(
        fh.data.shape) * wcsutils.proj_plane_pixel_area(ww)**0.5 * u.deg
    if radius > 1.25 * u.deg:
        radius = 1.25 * u.deg

    print(f"region {regname} file {fn}")
#    bkg_sum = bkg_mean * apertures.area()
#    final_sum = phot_table['aperture_sum_raw'] - bkg_sum
#    phot_table['residual_aperture_sum'] = final_sum
#    
#    # Compute magnitudes and magnitude differences as a double check
#    # (these should be centered around zero)
#    instMag   = -2.5*np.log10(phot_table['residual_aperture_sum'])
#    deltaMags = np.array(instMag - starCatalog1[thisMag])
#    deltaMag, med, std = sigma_clipped_stats(deltaMags, sigma=3.0, iters=5)
#
#    # Plot the deltaMag distribution to check if it is centered about zero
#    num, bins, patches = plt.hist(deltaMags)
#    plt.plot([deltaMag, deltaMag], [0, np.max(num)])
#    plt.show()
    
    # Perform the final conversion to Jy/arcsec^2
    wcs = WCS(stokesI.header)
    pixel_area = proj_plane_pixel_area(wcs)*(3600**2)
    BUNIT = 'uJy/sqarcs'
    BSCALE = zeroFlux[thisWaveband]*(1e6)*(multFact1/pixel_area)
    BZERO  = 0.0
    
    stokesI.header.set('BUNIT', value = BUNIT,
               comment='Physical units for the image')
    stokesI.header.set('BSCALE', value = BSCALE, after='BUNIT',
               comment='Conversion factor for physical units', )
    stokesI.header.set('BZERO', value = BZERO, after='BSCALE',
               comment='Zero level for the physical units.')
    
    # Finally write the updated header to disk
    stokesI.write()
Exemple #22
0
col_pal = sb.color_palette('colorblind')

cosinc = np.cos(gal.inclination.to(u.rad)).value

moment0 = fits.open(fourteenB_wGBT_HI_file_dict["Moment0"])[0]
moment0_wcs = WCS(moment0.header)

mom0_proj = Projection.from_hdu(moment0)

beam = Beam.from_fits_header(moment0.header)

# Convert to K km s and correct for disk inclination.
moment0_Kkm_s = beam.jtok(hi_freq).value * (moment0.data / 1000.) * cosinc
moment0_coldens = moment0_Kkm_s * hi_coldens_Kkms.value

pixscale = np.sqrt(proj_plane_pixel_area(moment0_wcs))

# Use the reprojected version
co_moment0 = fits.open(
    iram_co21_14B088_data_path("m33.co21_iram.14B-088_HI.mom0.fits"))[0]

co_noise_map = fits.open(
    iram_co21_14B088_data_path("m33.rms.14B-088_HI.fits"))[0]

onecolumn_Npanel_figure(N=1.5)

img_slice = (slice(902, 1084), slice(400, 650))
offset = [902, 400]

fig = plt.figure(figsize=(4.4, 5.84))
Exemple #23
0
    #    phot_table['residual_aperture_sum'] = final_sum
    #
    #    # Compute magnitudes and magnitude differences as a double check
    #    # (these should be centered around zero)
    #    instMag   = -2.5*np.log10(phot_table['residual_aperture_sum'])
    #    deltaMags = np.array(instMag - starCatalog1[thisMag])
    #    deltaMag, med, std = sigma_clipped_stats(deltaMags, sigma=3.0, iters=5)
    #
    #    # Plot the deltaMag distribution to check if it is centered about zero
    #    num, bins, patches = plt.hist(deltaMags)
    #    plt.plot([deltaMag, deltaMag], [0, np.max(num)])
    #    plt.show()

    # Perform the final conversion to Jy/arcsec^2
    wcs = WCS(stokesI.header)
    pixel_area = proj_plane_pixel_area(wcs) * (3600**2)
    BUNIT = 'uJy/sqarcs'
    BSCALE = zeroFlux[thisWaveband] * (1e6) * (multFact1 / pixel_area)
    BZERO = 0.0

    stokesI.header.set('BUNIT',
                       value=BUNIT,
                       comment='Physical units for the image')
    stokesI.header.set(
        'BSCALE',
        value=BSCALE,
        after='BUNIT',
        comment='Conversion factor for physical units',
    )
    stokesI.header.set('BZERO',
                       value=BZERO,
gf10 = modeling.models.Gaussian2D(amplitude=cs10pk.max(),
                                  x_mean=cs10peak[2],
                                  y_mean=cs10peak[1])
yinds, xinds = np.indices(cs10pk.shape)
fitter10 = modeling.fitting.LevMarLSQFitter()
gf10fit = fitter10(model=gf10,
                   x=xinds,
                   y=yinds,
                   z=cs10pk,
                   weights=1 / stats.mad_std(cs10pk.value))

# correct cs10 wcs
ww_cs10_corr = cs10pk.wcs
ww_cs10_corr.wcs.radesys = 'ICRS'
center10 = ww_cs10_corr.wcs_pix2world(gf10fit.x_mean, gf10fit.y_mean, 0)
pixscale10 = wcsutils.proj_plane_pixel_area(cs10pk.wcs)**0.5 * u.deg
centererr10 = np.array([
    fitter10.fit_info['param_cov'][(1, 1)],
    fitter10.fit_info['param_cov'][(2, 2)]
])**0.5 * pixscale10

cs21pk = cs21cube.max(axis=0)
gf21 = modeling.models.Gaussian2D(amplitude=cs21pk.max(),
                                  x_mean=cs21peak[2],
                                  y_mean=cs21peak[1])
yinds, xinds = np.indices(cs21pk.shape)
fitter21 = modeling.fitting.LevMarLSQFitter()
fitter21(model=gf21, x=xinds, y=yinds, z=cs21pk)
gf21fit = fitter21(model=gf21,
                   x=xinds,
                   y=yinds,
Exemple #25
0
    def render_data(self,
                    row,
                    spec1d_data=None,
                    spec2d_data=None,
                    image_data=None):
        """
        Render the updated data sets in the individual plot widgets within the
        MOSViz viewer.
        """
        self._check_unsaved_comments()

        if spec1d_data is not None:

            spectrum1d_x = spec1d_data[spec1d_data.id['Wavelength']]
            spectrum1d_y = spec1d_data[spec1d_data.id['Flux']]
            spectrum1d_yerr = spec1d_data[spec1d_data.id['Uncertainty']]

            self.spectrum1d_widget.set_data(x=spectrum1d_x,
                                            y=spectrum1d_y,
                                            yerr=spectrum1d_yerr)

            # Try to retrieve the wcs information
            try:
                flux_unit = spec1d_data.header.get('BUNIT', 'Jy').lower()
                flux_unit = flux_unit.replace('counts', 'count')
                flux_unit = u.Unit(flux_unit)
            except ValueError:
                flux_unit = u.Unit("Jy")

            try:
                disp_unit = spec1d_data.header.get('CUNIT1',
                                                   'Angstrom').lower()
                disp_unit = u.Unit(disp_unit)
            except ValueError:
                disp_unit = u.Unit("Angstrom")

            self.spectrum1d_widget.axes.set_xlabel(
                "Wavelength [{}]".format(disp_unit))
            self.spectrum1d_widget.axes.set_ylabel(
                "Flux [{}]".format(flux_unit))

        if image_data is not None:
            wcs = image_data.coords.wcs

            self.image_widget.set_image(image_data.get_component(
                image_data.id['Flux']).data,
                                        wcs=wcs,
                                        interpolation='none',
                                        origin='lower')

            self.image_widget.axes.set_xlabel("Spatial X")
            self.image_widget.axes.set_ylabel("Spatial Y")

            # Add the slit patch to the plot

            ra = row[self.catalog.meta["special_columns"]
                     ["slit_ra"]] * u.degree
            dec = row[self.catalog.meta["special_columns"]
                      ["slit_dec"]] * u.degree
            slit_width = row[self.catalog.meta["special_columns"]
                             ["slit_width"]]
            slit_length = row[self.catalog.meta["special_columns"]
                              ["slit_length"]]

            skycoord = SkyCoord(ra, dec, frame='fk5')
            xp, yp = skycoord.to_pixel(wcs)

            scale = np.sqrt(proj_plane_pixel_area(wcs)) * 3600.

            dx = slit_width / scale
            dy = slit_length / scale

            self.image_widget.draw_rectangle(x=xp, y=yp, width=dx, height=dy)

            self.image_widget._redraw()
        else:
            self.image_widget.setVisible(False)

        # Plot the 2D spectrum data last because by then we can make sure that
        # we set up the extent of the image appropriately if the cutout and the
        # 1D spectrum are present so that the axes can be locked.

        if spec2d_data is not None:
            wcs = spec2d_data.coords.wcs

            xp2d = np.arange(spec2d_data.shape[1])
            yp2d = np.repeat(0, spec2d_data.shape[1])
            spectrum2d_disp, spectrum2d_offset = spec2d_data.coords.pixel2world(
                xp2d, yp2d)
            x_min = spectrum2d_disp.min()
            x_max = spectrum2d_disp.max()

            if image_data is None:
                y_min = -0.5
                y_max = spec2d_data.shape[0] - 0.5
            else:
                y_min = yp - dy / 2.
                y_max = yp + dy / 2.

            extent = [x_min, x_max, y_min, y_max]

            self.spectrum2d_widget.set_image(image=spec2d_data.get_component(
                spec2d_data.id['Flux']).data,
                                             interpolation='none',
                                             aspect='auto',
                                             extent=extent,
                                             origin='lower')

            self.spectrum2d_widget.axes.set_xlabel("Wavelength")
            self.spectrum2d_widget.axes.set_ylabel("Spatial Y")

            self.spectrum2d_widget._redraw()

        # Clear the meta information widget
        # NOTE: this process is inefficient
        for i in range(self.meta_form_layout.count()):
            wid = self.meta_form_layout.itemAt(i).widget()
            label = self.meta_form_layout.labelForField(wid)

            if label is not None:
                label.deleteLater()

            wid.deleteLater()

        # Repopulate the form layout
        # NOTE: this process is inefficient
        for col in row.colnames:
            if col.lower() not in ["comments", "flag"]:
                line_edit = QLineEdit(str(row[col]),
                                      self.central_widget.meta_form_widget)
                line_edit.setReadOnly(True)

                self.meta_form_layout.addRow(col, line_edit)

        # Set up comment and flag input/display boxes
        if self.comments:
            if self.savepath is not None:
                if self.savepath == -1:
                    line_edit = QLineEdit(
                        os.path.basename("Not Saving to File."),
                        self.central_widget.meta_form_widget)
                    line_edit.setReadOnly(True)
                    self.meta_form_layout.addRow("Save File", line_edit)
                else:
                    line_edit = QLineEdit(os.path.basename(self.savepath),
                                          self.central_widget.meta_form_widget)
                    line_edit.setReadOnly(True)
                    self.meta_form_layout.addRow("Save File", line_edit)

            self.input_flag = QLineEdit(self.get_flag(),
                                        self.central_widget.meta_form_widget)
            self.input_flag.textChanged.connect(self._text_changed)
            self.input_flag.setStyleSheet(
                "background-color: rgba(255, 255, 255);")
            self.meta_form_layout.addRow("Flag", self.input_flag)

            self.input_comments = QPlainTextEdit(
                self.get_comment(), self.central_widget.meta_form_widget)
            self.input_comments.textChanged.connect(self._text_changed)
            self.input_comments.setStyleSheet(
                "background-color: rgba(255, 255, 255);")
            self.meta_form_layout.addRow("Comments", self.input_comments)

            self.input_save = QPushButton('Save',
                                          self.central_widget.meta_form_widget)
            self.input_save.clicked.connect(self.update_comments)
            self.input_save.setDefault(True)

            self.input_refresh = QPushButton(
                'Reload', self.central_widget.meta_form_widget)
            self.input_refresh.clicked.connect(self.refresh_comments)

            self.meta_form_layout.addRow(self.input_save, self.input_refresh)
def calc_radial_profile(fitsfile, center, rstart, rend, rstep, verbose=False, detmaskfile=None, plot=True):
    """

    Utility function to calculate the radial profile from an image `fitsfile` at a `center`

    """
    #
    if (not os.path.isfile(fitsfile)):
        print(f"ERROR. FITS file {fitsfile} not found. Cannot continue.")
        return None
    #
    qhdu = fits.open(fitsfile)
    wcs = WCS(qhdu[0].header)
    #
    # if detmaskfile is provided then will use it for detector mask
    #
    doMask = False
    if (detmaskfile != None):
        if (not os.path.isfile(detmaskfile)):
            print(f"Warning. Detector mask file {detmaskfile} not found. Will not use detector mask!")
            doMask = False
        else:
            det = fits.open(detmaskfile)
            detmask = det['MASK']
            # need the WCS
            wcs_det = WCS(detmask.header)
            doMask = True
    #
    if (not isinstance(center, SkyCoord)):
        print(f"ERROR: the input radial profile centre is not SkyCoord object. Cannot continue.")
        return None
    #
    j = 0
    rx = rstart
    counts = []
    counts_err = []
    rmid = []
    #
    emtpy = False
    while rx < rend:
        r0 = rstart + rstep * j
        rx = rstart + rstep * (j + 1)
        # the mid point, can be better the mid area point
        rmid.append((r0.value + rx.value) / 2.0)
        if (j == 0):
            xap = SkyCircularAperture(center, rx)
            photo = aperture_photometry(qhdu[0].data, xap, wcs=wcs)
            if (doMask):
                masked = aperture_photometry(detmask.data, xap, wcs=wcs_det)
        else:
            xap = SkyCircularAnnulus(center, r0, rx)
            photo = aperture_photometry(qhdu[0].data, xap, wcs=wcs)
            if (doMask):
                masked = aperture_photometry(detmask.data, xap, wcs=wcs_det)
        #
        ap_area = xap.to_pixel(wcs).area
        good_area = ap_area
        if (doMask):
            good_area = masked['aperture_sum'][0]
        # compare the two annuli areas: with and without bad pixels
        if (verbose):
            print(
                f"Annulus: {r0:.2f},{rx:.2f},geometric area: {ap_area:.1f} pixels,non-masked area {good_area:.1f} pixels, ratio: {ap_area / good_area:.2f}")
        # taking into account the masked pixels
        if (good_area == 0.0):
            counts.append(float('nan'))
            counts_err.append(float('nan'))
        else:
            counts.append(photo['aperture_sum'][0] / good_area)
            counts_err.append(np.sqrt(photo['aperture_sum'][0]) / good_area)

        j += 1
    #
    # convert the results in numpy arrays
    #
    rmid = np.array(rmid)
    counts = np.array(counts)
    counts_err = np.array(counts_err)
    #
    # convert per pixel to per arcsec^2
    pix_area = utils.proj_plane_pixel_area(wcs) * 3600.0 * 3600.0  # in arcsec^2
    counts = counts / pix_area
    counts_err = counts_err / pix_area
    #
    if (plot):
        fig, ax = plt.subplots(figsize=(10, 8))
        ax.errorbar(rmid, counts, xerr=rstep.value / 2.0, yerr=counts_err)
        ax.set_xscale('linear')
        ax.set_yscale('log')
        ax.set_xlabel('Radial distance (arcsec)')
        ax.set_ylabel(r'Counts/arcsec$^2$')
        ax.grid()
        ax.set_title(f"Radial profile");
    qhdu.close()
    if (doMask):
        det.close()
    return rmid, counts, counts_err
Exemple #27
0
def photometry(fileid):
    """
	Run photometry.

	.. codeauthor:: Rasmus Handberg <*****@*****.**>
	"""

    # Settings:
    ref_mag_limit = 17  # Lower limit on reference target brightness
    ref_target_dist_limit = 30  # Reference star must be further than this away to be included

    logger = logging.getLogger(__name__)
    tic = default_timer()

    # Use local copy of archive if configured to do so:
    config = load_config()

    # Get datafile dict from API:
    datafile = api.get_datafile(fileid)
    logger.debug("Datafile: %s", datafile)
    targetid = datafile['targetid']
    photfilter = datafile['photfilter']

    archive_local = config.get('photometry', 'archive_local', fallback=None)
    if archive_local is not None:
        datafile['archive_path'] = archive_local
    if not os.path.isdir(datafile['archive_path']):
        raise FileNotFoundError("ARCHIVE is not available")

    # Get the catalog containing the target and reference stars:
    # TODO: Include proper-motion to the time of observation
    catalog = api.get_catalog(targetid, output='table')
    target = catalog['target'][0]

    # Extract information about target:
    target_name = str(target['target_name'])
    target_coord = coords.SkyCoord(ra=target['ra'],
                                   dec=target['decl'],
                                   unit='deg',
                                   frame='icrs')

    # Folder to save output:
    # TODO: Change this!
    output_folder_root = config.get('photometry', 'output', fallback='.')
    output_folder = os.path.join(output_folder_root, target_name,
                                 '%04d' % fileid)
    os.makedirs(output_folder, exist_ok=True)

    # Also write any logging output to the
    formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
    _filehandler = logging.FileHandler(os.path.join(output_folder,
                                                    'photometry.log'),
                                       mode='w')
    _filehandler.setFormatter(formatter)
    _filehandler.setLevel(logging.INFO)
    logger.addHandler(_filehandler)

    # The paths to the science image:
    filepath = os.path.join(datafile['archive_path'], datafile['path'])

    # TODO: Download datafile using API to local drive:
    # TODO: Is this a security concern?
    #if archive_local:
    #	api.download_datafile(datafile, archive_local)

    # Translate photometric filter into table column:
    if photfilter == 'gp':
        ref_filter = 'g_mag'
    elif photfilter == 'rp':
        ref_filter = 'r_mag'
    elif photfilter == 'ip':
        ref_filter = 'i_mag'
    elif photfilter == 'zp':
        ref_filter = 'z_mag'
    elif photfilter == 'B':
        ref_filter = 'B_mag'
    elif photfilter == 'V':
        ref_filter = 'V_mag'
    else:
        logger.warning(
            "Could not find filter '%s' in catalogs. Using default gp filter.",
            photfilter)
        ref_filter = 'g_mag'

    references = catalog['references']
    references.sort(ref_filter)

    # Load the image from the FITS file:
    image = load_image(filepath)

    # Calculate pixel-coordinates of references:
    row_col_coords = image.wcs.all_world2pix(
        np.array([[ref['ra'], ref['decl']] for ref in references]), 0)
    references['pixel_column'] = row_col_coords[:, 0]
    references['pixel_row'] = row_col_coords[:, 1]

    # Calculate the targets position in the image:
    target_pixel_pos = image.wcs.all_world2pix(
        [[target['ra'], target['decl']]], 0)[0]

    # Clean out the references:
    hsize = 10
    x = references['pixel_column']
    y = references['pixel_row']
    references = references[
        (np.sqrt((x - target_pixel_pos[0])**2 +
                 (y - target_pixel_pos[1])**2) > ref_target_dist_limit)
        & (references[ref_filter] < ref_mag_limit)
        & (x > hsize) & (x < (image.shape[1] - 1 - hsize))
        & (y > hsize) & (y < (image.shape[0] - 1 - hsize))]

    #==============================================================================================
    # BARYCENTRIC CORRECTION OF TIME
    #==============================================================================================

    ltt_bary = image.obstime.light_travel_time(target_coord, ephemeris='jpl')
    image.obstime = image.obstime.tdb + ltt_bary

    #==============================================================================================
    # BACKGROUND ESITMATION
    #==============================================================================================

    fig, ax = plt.subplots(1, 2, figsize=(20, 18))
    plot_image(image.clean, ax=ax[0], scale='log', cbar='right', title='Image')
    plot_image(image.mask,
               ax=ax[1],
               scale='linear',
               cbar='right',
               title='Mask')
    fig.savefig(os.path.join(output_folder, 'original.png'),
                bbox_inches='tight')
    plt.close(fig)

    # Estimate image background:
    # Not using image.clean here, since we are redefining the mask anyway
    bkg = Background2D(
        image.clean,
        (128, 128),
        filter_size=(5, 5),
        #mask=image.mask | (image.clean > background_cutoff),
        sigma_clip=SigmaClip(sigma=3.0),
        bkg_estimator=SExtractorBackground(),
        exclude_percentile=50.0)
    image.background = bkg.background

    # Create background-subtracted image:
    image.subclean = image.clean - image.background

    # Plot background estimation:
    fig, ax = plt.subplots(1, 3, figsize=(20, 6))
    plot_image(image.clean, ax=ax[0], scale='log', title='Original')
    plot_image(image.background, ax=ax[1], scale='log', title='Background')
    plot_image(image.subclean,
               ax=ax[2],
               scale='log',
               title='Background subtracted')
    fig.savefig(os.path.join(output_folder, 'background.png'),
                bbox_inches='tight')
    plt.close(fig)

    # TODO: Is this correct?!
    image.error = calc_total_error(image.clean, bkg.background_rms, 1.0)

    #==============================================================================================
    # DETECTION OF STARS AND MATCHING WITH CATALOG
    #==============================================================================================

    logger.info("References:\n%s", references)

    radius = 10
    fwhm_guess = 6.0
    fwhm_min = 3.5
    fwhm_max = 13.5

    # Extract stars sub-images:
    #stars = extract_stars(
    #	NDData(data=image.subclean, mask=image.mask),
    #	stars_for_epsf,
    #	size=size
    #)

    # Set up 2D Gaussian model for fitting to reference stars:
    g2d = models.Gaussian2D(amplitude=1.0,
                            x_mean=radius,
                            y_mean=radius,
                            x_stddev=fwhm_guess * gaussian_fwhm_to_sigma)
    g2d.amplitude.bounds = (0.1, 2.0)
    g2d.x_mean.bounds = (0.5 * radius, 1.5 * radius)
    g2d.y_mean.bounds = (0.5 * radius, 1.5 * radius)
    g2d.x_stddev.bounds = (fwhm_min * gaussian_fwhm_to_sigma,
                           fwhm_max * gaussian_fwhm_to_sigma)
    g2d.y_stddev.tied = lambda model: model.x_stddev
    g2d.theta.fixed = True

    gfitter = fitting.LevMarLSQFitter()

    fwhms = np.full(len(references), np.NaN)
    for i, (x, y) in enumerate(
            zip(references['pixel_column'], references['pixel_row'])):
        x = int(np.round(x))
        y = int(np.round(y))
        x0, y0, width, height = x - radius, y - radius, 2 * radius, 2 * radius
        cutout = slice(y0 - 1, y0 + height), slice(x0 - 1, x0 + width)

        curr_star = image.subclean[cutout] / np.max(image.subclean[cutout])
        npix = len(curr_star)

        ypos, xpos = np.mgrid[:npix, :npix]
        gfit = gfitter(g2d, x=xpos, y=ypos, z=curr_star)

        fwhms[i] = gfit.x_fwhm

    mask = ~np.isfinite(fwhms) | (fwhms <= fwhm_min) | (fwhms >= fwhm_max)
    masked_fwhms = np.ma.masked_array(fwhms, mask)

    fwhm = np.mean(sigma_clip(masked_fwhms, maxiters=20, sigma=2.0))
    logger.info("FWHM: %f", fwhm)

    # Use DAOStarFinder to search the image for stars, and only use reference-stars where a
    # star was actually detected close to the references-star coordinate:
    cleanout_references = (len(references) > 50)

    if cleanout_references:
        daofind_tbl = DAOStarFinder(100, fwhm=fwhm, roundlo=-0.5,
                                    roundhi=0.5).find_stars(image.subclean,
                                                            mask=image.mask)
        indx_good = np.zeros(len(references), dtype='bool')
        for k, ref in enumerate(references):
            dist = np.sqrt((daofind_tbl['xcentroid'] -
                            ref['pixel_column'])**2 +
                           (daofind_tbl['ycentroid'] - ref['pixel_row'])**2)
            if np.any(dist <= fwhm / 4):  # Cutoff set somewhat arbitrary
                indx_good[k] = True

        references = references[indx_good]

    fig, ax = plt.subplots(1, 1, figsize=(20, 18))
    plot_image(image.subclean,
               ax=ax,
               scale='log',
               cbar='right',
               title=target_name)
    ax.scatter(references['pixel_column'],
               references['pixel_row'],
               c='r',
               alpha=0.3)
    if cleanout_references:
        ax.scatter(daofind_tbl['xcentroid'],
                   daofind_tbl['ycentroid'],
                   c='g',
                   alpha=0.3)
    ax.scatter(target_pixel_pos[0], target_pixel_pos[1], marker='+', c='r')
    fig.savefig(os.path.join(output_folder, 'positions.png'),
                bbox_inches='tight')
    plt.close(fig)

    #==============================================================================================
    # CREATE EFFECTIVE PSF MODEL
    #==============================================================================================

    # Make cutouts of stars using extract_stars:
    # Scales with FWHM
    size = int(np.round(29 * fwhm / 6))
    if size % 2 == 0:
        size += 1  # Make sure it's a uneven number
    size = max(size, 15)  # Never go below 15 pixels
    hsize = (size - 1) / 2

    x = references['pixel_column']
    y = references['pixel_row']
    mask_near_edge = ((x > hsize) & (x < (image.shape[1] - 1 - hsize))
                      & (y > hsize) & (y < (image.shape[0] - 1 - hsize)))

    stars_for_epsf = Table()
    stars_for_epsf['x'] = x[mask_near_edge]
    stars_for_epsf['y'] = y[mask_near_edge]

    # Store which stars were used in ePSF in the table:
    logger.info("Number of stars used for ePSF: %d", len(stars_for_epsf))
    references['used_for_epsf'] = mask_near_edge

    # Extract stars sub-images:
    stars = extract_stars(NDData(data=image.subclean, mask=image.mask),
                          stars_for_epsf,
                          size=size)

    # Plot the stars being used for ePSF:
    nrows = 5
    ncols = 5
    imgnr = 0
    for k in range(int(np.ceil(len(stars_for_epsf) / (nrows * ncols)))):
        fig, ax = plt.subplots(nrows=nrows,
                               ncols=ncols,
                               figsize=(20, 20),
                               squeeze=True)
        ax = ax.ravel()
        for i in range(nrows * ncols):
            if imgnr > len(stars_for_epsf) - 1:
                ax[i].axis('off')
            else:
                plot_image(stars[imgnr], ax=ax[i], scale='log', cmap='viridis')
            imgnr += 1

        fig.savefig(os.path.join(output_folder,
                                 'epsf_stars%02d.png' % (k + 1)),
                    bbox_inches='tight')
        plt.close(fig)

    # Build the ePSF:
    epsf = EPSFBuilder(oversampling=1.0,
                       maxiters=500,
                       fitter=EPSFFitter(fit_boxsize=2 * fwhm),
                       progress_bar=True)(stars)[0]

    logger.info('Successfully built PSF model')

    fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(15, 15))
    plot_image(epsf.data, ax=ax1, cmap='viridis')

    fwhms = []
    for a, ax in ((0, ax3), (1, ax2)):
        # Collapse the PDF along this axis:
        profile = epsf.data.sum(axis=a)
        itop = profile.argmax()
        poffset = profile[itop] / 2

        # Run a spline through the points, but subtract half of the peak value, and find the roots:
        # We have to use a cubic spline, since roots() is not supported for other splines
        # for some reason
        profile_intp = UnivariateSpline(np.arange(0, len(profile)),
                                        profile - poffset,
                                        k=3,
                                        s=0,
                                        ext=3)
        lr = profile_intp.roots()
        axis_fwhm = lr[1] - lr[0]

        fwhms.append(axis_fwhm)

        x_fine = np.linspace(-0.5, len(profile) - 0.5, 500)

        ax.plot(profile, 'k.-')
        ax.plot(x_fine, profile_intp(x_fine) + poffset, 'g-')
        ax.axvline(itop)
        ax.axvspan(lr[0], lr[1], facecolor='g', alpha=0.2)
        ax.set_xlim(-0.5, len(profile) - 0.5)

    # Let's make the final FWHM the largest one we found:
    fwhm = np.max(fwhms)
    logger.info("Final FWHM based on ePSF: %f", fwhm)

    #ax2.axvspan(itop - fwhm/2, itop + fwhm/2, facecolor='b', alpha=0.2)
    #ax3.axvspan(itop - fwhm/2, itop + fwhm/2, facecolor='b', alpha=0.2)
    ax4.axis('off')
    fig.savefig(os.path.join(output_folder, 'epsf.png'), bbox_inches='tight')
    plt.close(fig)

    #==============================================================================================
    # COORDINATES TO DO PHOTOMETRY AT
    #==============================================================================================

    coordinates = np.array([[ref['pixel_column'], ref['pixel_row']]
                            for ref in references])

    # Add the main target position as the first entry:
    if datafile.get('template') is None:
        coordinates = np.concatenate(([target_pixel_pos], coordinates), axis=0)

    #==============================================================================================
    # APERTURE PHOTOMETRY
    #==============================================================================================

    # Define apertures for aperture photometry:
    apertures = CircularAperture(coordinates, r=fwhm)
    annuli = CircularAnnulus(coordinates, r_in=1.5 * fwhm, r_out=2.5 * fwhm)

    apphot_tbl = aperture_photometry(image.subclean, [apertures, annuli],
                                     mask=image.mask,
                                     error=image.error)

    logger.debug("Aperture Photometry Table:\n%s", apphot_tbl)
    logger.info('Apperature Photometry Success')

    #==============================================================================================
    # PSF PHOTOMETRY
    #==============================================================================================

    # Are we fixing the postions?
    epsf.fixed.update({'x_0': False, 'y_0': False})

    # Create photometry object:
    photometry = BasicPSFPhotometry(group_maker=DAOGroup(fwhm),
                                    bkg_estimator=SExtractorBackground(),
                                    psf_model=epsf,
                                    fitter=fitting.LevMarLSQFitter(),
                                    fitshape=size,
                                    aperture_radius=fwhm)

    psfphot_tbl = photometry(image=image.subclean,
                             init_guesses=Table(coordinates,
                                                names=['x_0', 'y_0']))

    logger.debug("PSF Photometry Table:\n%s", psfphot_tbl)
    logger.info('PSF Photometry Success')

    #==============================================================================================
    # TEMPLATE SUBTRACTION AND TARGET PHOTOMETRY
    #==============================================================================================

    if datafile.get('template') is not None:
        # Find the pixel-scale of the science image:
        pixel_area = proj_plane_pixel_area(image.wcs.celestial)
        pixel_scale = np.sqrt(pixel_area) * 3600  # arcsec/pixel
        #print(image.wcs.celestial.cunit) % Doesn't work?
        logger.info("Science image pixel scale: %f", pixel_scale)

        # Run the template subtraction, and get back
        # the science image where the template has been subtracted:
        diffimage = run_imagematch(datafile,
                                   target,
                                   star_coord=coordinates,
                                   fwhm=fwhm,
                                   pixel_scale=pixel_scale)

        # Include mask from original image:
        diffimage = np.ma.masked_array(diffimage, image.mask)

        # Create apertures around the target:
        apertures = CircularAperture(target_pixel_pos, r=fwhm)
        annuli = CircularAnnulus(target_pixel_pos,
                                 r_in=1.5 * fwhm,
                                 r_out=2.5 * fwhm)

        # Create two plots of the difference image:
        fig, ax = plt.subplots(1, 1, squeeze=True, figsize=(20, 20))
        plot_image(diffimage, ax=ax, cbar='right', title=target_name)
        ax.plot(target_pixel_pos[0],
                target_pixel_pos[1],
                marker='+',
                color='r')
        fig.savefig(os.path.join(output_folder, 'diffimg.png'),
                    bbox_inches='tight')
        apertures.plot(color='r')
        annuli.plot(color='k')
        ax.set_xlim(target_pixel_pos[0] - 50, target_pixel_pos[0] + 50)
        ax.set_ylim(target_pixel_pos[1] - 50, target_pixel_pos[1] + 50)
        fig.savefig(os.path.join(output_folder, 'diffimg_zoom.png'),
                    bbox_inches='tight')
        plt.close(fig)

        # Run aperture photometry on subtracted image:
        target_apphot_tbl = aperture_photometry(diffimage, [apertures, annuli],
                                                mask=image.mask,
                                                error=image.error)

        # Run PSF photometry on template subtracted image:
        target_psfphot_tbl = photometry(diffimage,
                                        init_guesses=Table(
                                            target_pixel_pos,
                                            names=['x_0', 'y_0']))

        # Combine the output tables from the target and the reference stars into one:
        apphot_tbl = vstack([target_apphot_tbl, apphot_tbl], join_type='exact')
        psfphot_tbl = vstack([target_psfphot_tbl, psfphot_tbl],
                             join_type='exact')

    # Build results table:
    tab = references.copy()
    tab.insert_row(
        0, {
            'starid': 0,
            'ra': target['ra'],
            'decl': target['decl'],
            'pixel_column': target_pixel_pos[0],
            'pixel_row': target_pixel_pos[1]
        })
    for key in ('pm_ra', 'pm_dec', 'gaia_mag', 'gaia_bp_mag', 'gaia_rp_mag',
                'H_mag', 'J_mag', 'K_mag', 'g_mag', 'r_mag', 'i_mag', 'z_mag'):
        tab[0][key] = np.NaN

    # Subtract background estimated from annuli:
    flux_aperture = apphot_tbl['aperture_sum_0'] - (
        apphot_tbl['aperture_sum_1'] / annuli.area()) * apertures.area()
    flux_aperture_error = np.sqrt(apphot_tbl['aperture_sum_err_0']**2 +
                                  (apphot_tbl['aperture_sum_err_1'] /
                                   annuli.area() * apertures.area())**2)

    # Add table columns with results:
    tab['flux_aperture'] = flux_aperture / image.exptime
    tab['flux_aperture_error'] = flux_aperture_error / image.exptime
    tab['flux_psf'] = psfphot_tbl['flux_fit'] / image.exptime
    tab['flux_psf_error'] = psfphot_tbl['flux_unc'] / image.exptime
    tab['pixel_column_psf_fit'] = psfphot_tbl['x_fit']
    tab['pixel_row_psf_fit'] = psfphot_tbl['y_fit']
    tab['pixel_column_psf_fit_error'] = psfphot_tbl['x_0_unc']
    tab['pixel_row_psf_fit_error'] = psfphot_tbl['y_0_unc']

    # Theck that we got valid photometry:
    if not np.isfinite(tab[0]['flux_psf']) or not np.isfinite(
            tab[0]['flux_psf_error']):
        raise Exception("Target magnitude is undefined.")

    #==============================================================================================
    # CALIBRATE
    #==============================================================================================

    # Convert PSF fluxes to magnitudes:
    mag_inst = -2.5 * np.log10(tab['flux_psf'])
    mag_inst_err = (2.5 / np.log(10)) * (tab['flux_psf_error'] /
                                         tab['flux_psf'])

    # Corresponding magnitudes in catalog:
    mag_catalog = tab[ref_filter]

    # Mask out things that should not be used in calibration:
    use_for_calibration = np.ones_like(mag_catalog, dtype='bool')
    use_for_calibration[0] = False  # Do not use target for calibration
    use_for_calibration[~np.isfinite(mag_inst)
                        | ~np.isfinite(mag_catalog)] = False

    # Just creating some short-hands:
    x = mag_catalog[use_for_calibration]
    y = mag_inst[use_for_calibration]
    yerr = mag_inst_err[use_for_calibration]

    # Fit linear function with fixed slope, using sigma-clipping:
    model = models.Linear1D(slope=1, fixed={'slope': True})
    fitter = fitting.FittingWithOutlierRemoval(fitting.LinearLSQFitter(),
                                               sigma_clip,
                                               sigma=3.0)
    best_fit, sigma_clipped = fitter(model, x, y, weights=1.0 / yerr**2)

    # Extract zero-point and estimate its error:
    # I don't know why there is not an error-estimate attached directly to the Parameter?
    zp = -1 * best_fit.intercept.value  # Negative, because that is the way zeropoints are usually defined
    zp_error = nanstd(y[~sigma_clipped] - best_fit(x[~sigma_clipped]))

    # Add calibrated magnitudes to the photometry table:
    tab['mag'] = mag_inst + zp
    tab['mag_error'] = np.sqrt(mag_inst_err**2 + zp_error**2)

    fig, ax = plt.subplots(1, 1)
    ax.errorbar(x, y, yerr=yerr, fmt='k.')
    ax.scatter(x[sigma_clipped], y[sigma_clipped], marker='x', c='r')
    ax.plot(x, best_fit(x), color='g', linewidth=3)
    ax.set_xlabel('Catalog magnitude')
    ax.set_ylabel('Instrumental magnitude')
    fig.savefig(os.path.join(output_folder, 'calibration.png'),
                bbox_inches='tight')
    plt.close(fig)

    #==============================================================================================
    # SAVE PHOTOMETRY
    #==============================================================================================

    # Descriptions of columns:
    tab['flux_aperture'].unit = u.count / u.second
    tab['flux_aperture_error'].unit = u.count / u.second
    tab['flux_psf'].unit = u.count / u.second
    tab['flux_psf_error'].unit = u.count / u.second
    tab['pixel_column'].unit = u.pixel
    tab['pixel_row'].unit = u.pixel
    tab['pixel_column_psf_fit'].unit = u.pixel
    tab['pixel_row_psf_fit'].unit = u.pixel
    tab['pixel_column_psf_fit_error'].unit = u.pixel
    tab['pixel_row_psf_fit_error'].unit = u.pixel

    # Meta-data:
    tab.meta['version'] = __version__
    tab.meta['fileid'] = fileid
    tab.meta['template'] = None if datafile.get(
        'template') is None else datafile['template']['fileid']
    tab.meta['photfilter'] = photfilter
    tab.meta['fwhm'] = fwhm
    tab.meta['obstime-bmjd'] = float(image.obstime.mjd)
    tab.meta['zp'] = zp
    tab.meta['zp_error'] = zp_error

    # Filepath where to save photometry:
    photometry_output = os.path.join(output_folder, 'photometry.ecsv')

    # Write the final table to file:
    tab.write(photometry_output,
              format='ascii.ecsv',
              delimiter=',',
              overwrite=True)

    toc = default_timer()
    logger.info("Photometry took: %f seconds", toc - tic)

    return photometry_output
Exemple #28
0
def run_imagematch(datafile, target=None, star_coord=None, fwhm=None, pixel_scale=None):
	"""

	.. codeauthor:: Rasmus Handberg <*****@*****.**>
	"""

	logger = logging.getLogger(__name__)

	if datafile.get('template') is None:
		raise ValueError("DATAFILE input does not specify a template to use.")

	# Extract paths to science and reference images:
	reference_image = os.path.join(datafile['archive_path'], datafile['template']['path'])
	science_image = os.path.join(datafile['archive_path'], datafile['path'])

	# If the target was not provided in the function call,
	# use the API to get the target information:
	if target is None:
		catalog = api.get_catalog(datafile['targetid'], output='table')
		target = catalog['target'][0]

	# Find the path to where the ImageMatch program is installed.
	# This is to avoid problems with it not being on the users PATH
	# and if the user is using some other version of the python executable.
	# TODO: There must be a better way of doing this!
	imgmatch = os.path.join(get_setuptools_script_dir(), 'ImageMatch')
	if os.name == "nt":
		out = subprocess.check_output(["where", "ImageMatch"], universal_newlines=True)
		imgmatch = out.strip()
	else:
		out = subprocess.check_output(["whereis", "ImageMatch"], universal_newlines=True)
		out = re.match('ImageMatch: (.+)', out.strip())
		imgmatch = out.group(1)

	if not os.path.isfile(imgmatch):
		raise FileNotFoundError("ImageMatch not found")

	# Find the ImageMatch config file to use based on the site of the observations:
	__dir__ = os.path.dirname(os.path.abspath(__file__))
	if datafile['site'] in (1,3,4,6):
		config_file = os.path.join(__dir__, 'imagematch', 'imagematch_lcogt.cfg')
	elif datafile['site'] == 2:
		config_file = os.path.join(__dir__, 'imagematch', 'imagematch_hawki.cfg')
	elif datafile['site'] == 5:
		config_file = os.path.join(__dir__, 'imagematch', 'imagematch_alfosc.cfg')
	else:
		config_file = os.path.join(__dir__, 'imagematch', 'imagematch_default.cfg')
	if not os.path.isfile(config_file):
		raise FileNotFoundError(config_file)

	if pixel_scale is None:
		if datafile['site'] in (1,3,4,6):
			# LCOGT provides the pixel scale directly in the header
			pixel_scale = 'PIXSCALE'
		else:
			image = load_image(science_image)
			pixel_area = proj_plane_pixel_area(image.wcs)
			pixel_scale = np.sqrt(pixel_area)*3600 # arcsec/pixel
			logger.info("Calculated science image pixel scale: %f", pixel_scale)

	if datafile['template']['site'] in (1,3,4,6):
		# LCOGT provides the pixel scale directly in the header
		mscale = 'PIXSCALE'
	else:
		template = load_image(reference_image)
		template_pixel_area = proj_plane_pixel_area(template.wcs.celestial)
		mscale = np.sqrt(template_pixel_area)*3600 # arcsec/pixel
		logger.info("Calculated template pixel scale: %f", mscale)

	# Scale kernel radius with FWHM:
	if fwhm is None:
		kernel_radius = 9
	else:
		kernel_radius = max(9, int(np.ceil(1.5*fwhm)))
		if kernel_radius % 2 == 0:
			kernel_radius += 1

	# We will work in a temporary directory, since ImageMatch produces
	# a lot of extra output files that we don't want to have lying around
	# after it completes
	with tempfile.TemporaryDirectory() as tmpdir:

		# Copy the science and reference image to the temp dir:
		shutil.copy(reference_image, tmpdir)
		shutil.copy(science_image, tmpdir)

		# Construct the command to run ImageMatch:
		for match_threshold in (3.0, 5.0, 7.0):
			cmd = '"{python:s}" "{imgmatch:s}" -cfg "{config_file:s}" -snx {target_ra:.10f}d -sny {target_dec:.10f}d -p {kernel_radius:d} -s {match:f} -scale {pixel_scale:} -mscale {mscale:} -m "{reference_image:s}" "{science_image:s}"'.format(
				python=sys.executable,
				imgmatch=imgmatch,
				config_file=config_file,
				reference_image=os.path.basename(reference_image),
				science_image=os.path.basename(science_image),
				target_ra=target['ra'],
				target_dec=target['decl'],
				match=match_threshold,
				kernel_radius=kernel_radius,
				pixel_scale=pixel_scale,
				mscale=mscale
			)
			logger.info("Executing command: %s", cmd)

			# Run the command in a subprocess:
			cmd = shlex.split(cmd)
			proc = subprocess.Popen(cmd,
				cwd=tmpdir,
				stdout=subprocess.PIPE,
				stderr=subprocess.PIPE,
				universal_newlines=True)
			stdout_data, stderr_data = proc.communicate()

			# Check the outputs from the subprocess:
			logger.info("Return code: %d", proc.returncode)
			logger.info("STDOUT:\n%s", stdout_data.strip())
			if stderr_data.strip() != '':
				logger.error("STDERR:\n%s", stderr_data.strip())
			if proc.returncode < 0:
				raise Exception("ImageMatch failed. Processed killed by OS with returncode %d." % proc.returncode)
			elif 'Failed object match... giving up.' in stdout_data:
				#raise Exception("ImageMatch giving up matching objects")
				continue
			elif proc.returncode > 0:
				raise Exception("ImageMatch failed.")

			# Load the resulting difference image into memory:
			diffimg_path = os.path.join(tmpdir, os.path.splitext(os.path.basename(science_image))[0] + 'diff.fits')
			if not os.path.isfile(diffimg_path):
				raise FileNotFoundError(diffimg_path)

			break

		with fits.open(diffimg_path, mode='readonly') as hdu:
			diffimg = np.asarray(hdu[0].data)

	return diffimg