Esempio n. 1
0
def img_data(file_name):
    """Load common image files into a Glue data object"""
    result = Data()

    data = img_loader(file_name)
    data = np.flipud(data)
    shp = data.shape

    comps = []
    labels = []

    # split 3 color images into each color plane
    if len(shp) == 3 and shp[2] in [3, 4]:
        comps.extend([data[:, :, 0], data[:, :, 1], data[:, :, 2]])
        labels.extend(["red", "green", "blue"])
        if shp[2] == 4:
            comps.append(data[:, :, 3])
            labels.append("alpha")
    else:
        comps = [data]
        labels = ["PRIMARY"]

    # look for AVM coordinate metadata
    try:
        from pyavm import AVM

        avm = AVM(str(file_name))  # avoid unicode
        wcs = avm.to_wcs()
    except:
        pass
    else:
        result.coords = coordinates_from_wcs(wcs)

    for c, l in zip(comps, labels):
        result.add_component(c, l)

    return result
Esempio n. 2
0
def img_data(file_name):
    """Load common image files into a Glue data object"""
    result = Data()

    data = img_loader(file_name)
    data = np.flipud(data)
    shp = data.shape

    comps = []
    labels = []

    # split 3 color images into each color plane
    if len(shp) == 3 and shp[2] in [3, 4]:
        comps.extend([data[:, :, 0], data[:, :, 1], data[:, :, 2]])
        labels.extend(['red', 'green', 'blue'])
        if shp[2] == 4:
            comps.append(data[:, :, 3])
            labels.append('alpha')
    else:
        comps = [data]
        labels = ['PRIMARY']

    # look for AVM coordinate metadata
    try:
        from pyavm import AVM
        avm = AVM(str(file_name))  # avoid unicode
        wcs = avm.to_wcs()
    except:
        pass
    else:
        result.coords = coordinates_from_wcs(wcs)

    for c, l in zip(comps, labels):
        result.add_component(c, l)

    return result
Esempio n. 3
0
File: rgb.py Progetto: EDrabek/aplpy
def make_rgb_image(data, output, indices=(0, 1, 2), \
                   vmin_r=None, vmax_r=None, pmin_r=0.25, pmax_r=99.75, \
                   stretch_r='linear', vmid_r=None, exponent_r=2, \
                   vmin_g=None, vmax_g=None, pmin_g=0.25, pmax_g=99.75, \
                   stretch_g='linear', vmid_g=None, exponent_g=2, \
                   vmin_b=None, vmax_b=None, pmin_b=0.25, pmax_b=99.75, \
                   stretch_b='linear', vmid_b=None, exponent_b=2, \
                   embed_avm_tags=True):
    '''
    Make an RGB image from a FITS RGB cube or from three FITS files.

    Parameters
    ----------

    data : str or tuple or list
        If a string, this is the filename of an RGB FITS cube. If a tuple
        or list, this should give the filename of three files to use for
        the red, green, and blue channel.

    output : str
        The output filename. The image type (e.g. PNG, JPEG, TIFF, ...)
        will be determined from the extension. Any image type supported by
        the Python Imaging Library can be used.

    indices : tuple, optional
        If data is the filename of a FITS cube, these indices are the
        positions in the third dimension to use for red, green, and
        blue respectively. The default is to use the first three
        indices.

    vmin_r, vmin_g, vmin_b : float, optional
        Minimum pixel value to use for the red, green, and blue channels.
        If set to None for a given channel, the minimum pixel value for
        that channel is determined using the corresponding pmin_x argument
        (default).

    vmax_r, vmax_g, vmax_b : float, optional
        Maximum pixel value to use for the red, green, and blue channels.
        If set to None for a given channel, the maximum pixel value for
        that channel is determined using the corresponding pmax_x argument
        (default).

    pmin_r, pmin_r, pmin_g : float, optional
        Percentile values used to determine for a given channel the
        minimum pixel value to use for that channel if the corresponding
        vmin_x is set to None. The default is 0.25% for all channels.

    pmax_r, pmax_g, pmax_b : float, optional
        Percentile values used to determine for a given channel the
        maximum pixel value to use for that channel if the corresponding
        vmax_x is set to None. The default is 99.75% for all channels.

    stretch_r, stretch_g, stretch_b : { 'linear', 'log', 'sqrt', 'arcsinh', 'power' }
        The stretch function to use for the different channels.

    vmid_r, vmid_g, vmid_b : float, optional
        Baseline values used for the log and arcsinh stretches. If
        set to None, this is set to zero for log stretches and to
        vmin - (vmax - vmin) / 30. for arcsinh stretches

    exponent_r, exponent_g, exponent_b : float, optional
        If stretch_x is set to 'power', this is the exponent to use.

    embed_avm_tags : bool, optional
        Whether to embed AVM tags inside the image - this can only be done for
        JPEG and PNG files, and only if PyAVM is installed.
    '''

    try:
        from PIL import Image
    except ImportError:
        try:
            import Image
        except ImportError:
            raise ImportError("The Python Imaging Library (PIL) is required to make an RGB image")

    if isinstance(data, basestring):

        image = fits.getdata(data)
        image_r = image[indices[0], :, :]
        image_g = image[indices[1], :, :]
        image_b = image[indices[2], :, :]

        # Read in header
        header = fits.getheader(data)

        # Remove information about third dimension
        header['NAXIS'] = 2
        for key in ['NAXIS', 'CTYPE', 'CRPIX', 'CRVAL', 'CUNIT', 'CDELT', 'CROTA']:
            for coord in range(3, 6):
                name = key + str(coord)
                if name in header:
                    header.__delitem__(name)

    elif (type(data) == list or type(data) == tuple) and len(data) == 3:

        filename_r, filename_g, filename_b = data
        image_r = fits.getdata(filename_r)
        image_g = fits.getdata(filename_g)
        image_b = fits.getdata(filename_b)

        # Read in header
        header = fits.getheader(filename_r)

    else:
        raise Exception("data should either be the filename of a FITS cube or a list/tuple of three images")

    log.info("Red:")
    image_r = Image.fromarray(_data_stretch(image_r, \
                                            vmin=vmin_r, vmax=vmax_r, \
                                            pmin=pmin_r, pmax=pmax_r, \
                                            stretch=stretch_r, \
                                            vmid=vmid_r, \
                                            exponent=exponent_r))

    log.info("Green:")
    image_g = Image.fromarray(_data_stretch(image_g, \
                                            vmin=vmin_g, vmax=vmax_g, \
                                            pmin=pmin_g, pmax=pmax_g, \
                                            stretch=stretch_g, \
                                            vmid=vmid_g, \
                                            exponent=exponent_g))

    log.info("Blue:")
    image_b = Image.fromarray(_data_stretch(image_b, \
                                            vmin=vmin_b, vmax=vmax_b, \
                                            pmin=pmin_b, pmax=pmax_b, \
                                            stretch=stretch_b, \
                                            vmid=vmid_b, \
                                            exponent=exponent_b))

    img = Image.merge("RGB", (image_r, image_g, image_b))
    img = img.transpose(Image.FLIP_TOP_BOTTOM)

    img.save(output)

    if embed_avm_tags:

        try:
            import pyavm
        except ImportError:
            warnings.warn("PyAVM 0.9.1 or later is not installed, so AVM tags will not be embedded in RGB image")
            return

        if version.LooseVersion(pyavm.__version__) < version.LooseVersion('0.9.1'):
            warnings.warn("PyAVM 0.9.1 or later is not installed, so AVM tags will not be embedded in RGB image")
            return

        from pyavm import AVM

        if output.lower().endswith(('.jpg', '.jpeg', '.png')):
            avm = AVM.from_header(header)
            avm.embed(output, output)
        else:
            warnings.warn("AVM tags will not be embedded in RGB image, as only JPEG and PNG files are supported")
Esempio n. 4
0
    print cat_ngfs['ID'][i], ' ', cat_ngfs['M_i'][i], ' ', cat_ngfs['mu_i'][i]

sys.exit()

n = np.int(np.ceil(np.sqrt(len(cat_ngfs))))
n_column = 6
n_row = np.int(np.ceil(len(cat_ngfs) * 1. / n_column))
fig = plt.figure(figsize=(14., 14. * n_row / n_column), dpi=300)
gs = gridspec.GridSpec(n_row, n_column)
gs.update(left=0.03, right=0.97, bottom=0.03, top=0.97, wspace=0.2, hspace=0.2)

# Now reading the HD rgb image

im_data = np.flipud(skimage.io.imread(im_rgb_hd_file))
im_size = im_data.shape
avm = AVM.from_image(im_rgb_hd_file)
w = avm.to_wcs()
w.naxis1 = im_size[1]
w.naxis2 = im_size[0]

# Sort them by magnitude
cat_ngfs.sort('m_i')
cat_ngfs_coo = SkyCoord(cat_ngfs['RA'], cat_ngfs['DEC'], unit="deg")

for i in np.arange(len(cat_ngfs)):
    print 'Processing NGFS dwarf ', cat_ngfs['ID'][i]

    ax = plt.subplot(gs[i])
    ax.set_aspect('equal')
    ax.axis('off')
Esempio n. 5
0
def make_rgb_image(data, output, indices=(0, 1, 2), \
                   vmin_r=None, vmax_r=None, pmin_r=0.25, pmax_r=99.75, \
                   stretch_r='linear', vmid_r=None, exponent_r=2, \
                   vmin_g=None, vmax_g=None, pmin_g=0.25, pmax_g=99.75, \
                   stretch_g='linear', vmid_g=None, exponent_g=2, \
                   vmin_b=None, vmax_b=None, pmin_b=0.25, pmax_b=99.75, \
                   stretch_b='linear', vmid_b=None, exponent_b=2, \
                   embed_avm_tags=False):
    '''
    Make an RGB image from a FITS RGB cube or from three FITS files

    Required arguments:

        *data*: [ string | tuple | list ]
            If a string, this is the filename of an RGB FITS cube. If a tuple
            or list, this should give the filename of three files to use for
            the red, green, and blue channel.

        *output*: [ string ]
            The output filename. The image type (e.g. PNG, JPEG, TIFF, ...)
            will be determined from the extension. Any image type supported by
            the Python Imaging Library can be used.

    Optional keyword arguments:

        *indices*: [ tuple ]
            If data is the filename of a FITS cube, these indices are the
            positions in the third dimension to use for red, green, and
            blue respectively. The default is to use the first three
            indices.

        *vmin_r*: [ None | float ]

        *vmin_g*: [ None | float ]

        *vmin_b*: [ None | float ]

            Minimum pixel value to use for the red, green, and blue channels.
            If set to None for a given channel, the minimum pixel value for
            that channel is determined using the corresponding pmin_x argument
            (default).

        *vmax_r*: [ None | float ]

        *vmax_g*: [ None | float ]

        *vmax_b*: [ None | float ]

            Maximum pixel value to use for the red, green, and blue channels.
            If set to None for a given channel, the maximum pixel value for
            that channel is determined using the corresponding pmax_x argument
            (default).

        *pmin_r*: [ float ]

        *pmin_g*: [ float ]

        *pmin_b*: [ float ]

            Percentile values used to determine for a given channel the
            minimum pixel value to use for that channel if the corresponding
            vmin_x is set to None. The default is 0.25% for all channels.

        *pmax_r*: [ float ]

        *pmax_g*: [ float ]

        *pmax_b*: [ float ]

            Percentile values used to determine for a given channel the
            maximum pixel value to use for that channel if the corresponding
            vmax_x is set to None. The default is 99.75% for all channels.

        *stretch_r*: [ 'linear' | 'log' | 'sqrt' | 'arcsinh' | 'power' ]

        *stretch_g*: [ 'linear' | 'log' | 'sqrt' | 'arcsinh' | 'power' ]

        *stretch_b*: [ 'linear' | 'log' | 'sqrt' | 'arcsinh' | 'power' ]

            The stretch function to use for the different channels.

        *vmid_r*: [ None | float ]

        *vmid_g*: [ None | float ]

        *vmid_b*: [ None | float ]

            Baseline values used for the log and arcsinh stretches. If
            set to None, this is set to zero for log stretches and to
            vmin - (vmax - vmin) / 30. for arcsinh stretches

        *exponent_r*: [ float ]

        *exponent_g*: [ float ]

        *exponent_b*: [ float ]

            If stretch_x is set to 'power', this is the exponent to use.
        '''

    if not installed_pil:
        raise Exception(
            "The Python Imaging Library (PIL) is not installed but is required for this function"
        )

    if isinstance(data, basestring):

        image = pyfits.getdata(data)
        image_r = image[indices[0], :, :]
        image_g = image[indices[1], :, :]
        image_b = image[indices[2], :, :]

        # Read in header
        header = pyfits.getheader(data)

        # Remove information about third dimension
        header['NAXIS'] = 2
        for key in [
                'NAXIS', 'CTYPE', 'CRPIX', 'CRVAL', 'CUNIT', 'CDELT', 'CROTA'
        ]:
            for coord in range(3, 6):
                name = key + str(coord)
                if name in header:
                    header.__delitem__(name)

    elif (type(data) == list or type(data) == tuple) and len(data) == 3:

        filename_r, filename_g, filename_b = data
        image_r = pyfits.getdata(filename_r)
        image_g = pyfits.getdata(filename_g)
        image_b = pyfits.getdata(filename_b)

        # Read in header
        header = pyfits.getheader(filename_r)

    else:
        raise Exception(
            "data should either be the filename of a FITS cube or a list/tuple of three images"
        )

    logger.info("Red:")
    image_r = Image.fromarray(_data_stretch(image_r, \
                                            vmin=vmin_r, vmax=vmax_r, \
                                            pmin=pmin_r, pmax=pmax_r, \
                                            stretch=stretch_r, \
                                            vmid=vmid_r, \
                                            exponent=exponent_r))

    logger.info("\nGreen:")
    image_g = Image.fromarray(_data_stretch(image_g, \
                                            vmin=vmin_g, vmax=vmax_g, \
                                            pmin=pmin_g, pmax=pmax_g, \
                                            stretch=stretch_g, \
                                            vmid=vmid_g, \
                                            exponent=exponent_g))

    logger.info("\nBlue:")
    image_b = Image.fromarray(_data_stretch(image_b, \
                                            vmin=vmin_b, vmax=vmax_b, \
                                            pmin=pmin_b, pmax=pmax_b, \
                                            stretch=stretch_b, \
                                            vmid=vmid_b, \
                                            exponent=exponent_b))

    img = Image.merge("RGB", (image_r, image_g, image_b))
    img = img.transpose(Image.FLIP_TOP_BOTTOM)

    img.save(output)

    if embed_avm_tags:
        if not avm_installed:
            raise Exception(
                "PyAVM needs to be installed in order to be able to embed AVM tags into the RGB image"
            )
        else:
            avm = AVM(header)
            avm.embed(output, output)

    return
Esempio n. 6
0
def make_rgb_image(data, output, indices=(0, 1, 2), \
                   vmin_r=None, vmax_r=None, pmin_r=0.25, pmax_r=99.75, \
                   stretch_r='linear', vmid_r=None, exponent_r=2, \
                   vmin_g=None, vmax_g=None, pmin_g=0.25, pmax_g=99.75, \
                   stretch_g='linear', vmid_g=None, exponent_g=2, \
                   vmin_b=None, vmax_b=None, pmin_b=0.25, pmax_b=99.75, \
                   stretch_b='linear', vmid_b=None, exponent_b=2, \
                   embed_avm_tags=False):
    '''
    Make an RGB image from a FITS RGB cube or from three FITS files

    Required arguments:

        *data*: [ string | tuple | list ]
            If a string, this is the filename of an RGB FITS cube. If a tuple
            or list, this should give the filename of three files to use for
            the red, green, and blue channel.

        *output*: [ string ]
            The output filename. The image type (e.g. PNG, JPEG, TIFF, ...)
            will be determined from the extension. Any image type supported by
            the Python Imaging Library can be used.

    Optional keyword arguments:

        *indices*: [ tuple ]
            If data is the filename of a FITS cube, these indices are the
            positions in the third dimension to use for red, green, and
            blue respectively. The default is to use the first three
            indices.

        *vmin_r*: [ None | float ]

        *vmin_g*: [ None | float ]

        *vmin_b*: [ None | float ]

            Minimum pixel value to use for the red, green, and blue channels.
            If set to None for a given channel, the minimum pixel value for
            that channel is determined using the corresponding pmin_x argument
            (default).

        *vmax_r*: [ None | float ]

        *vmax_g*: [ None | float ]

        *vmax_b*: [ None | float ]

            Maximum pixel value to use for the red, green, and blue channels.
            If set to None for a given channel, the maximum pixel value for
            that channel is determined using the corresponding pmax_x argument
            (default).

        *pmin_r*: [ float ]

        *pmin_g*: [ float ]

        *pmin_b*: [ float ]

            Percentile values used to determine for a given channel the
            minimum pixel value to use for that channel if the corresponding
            vmin_x is set to None. The default is 0.25% for all channels.

        *pmax_r*: [ float ]

        *pmax_g*: [ float ]

        *pmax_b*: [ float ]

            Percentile values used to determine for a given channel the
            maximum pixel value to use for that channel if the corresponding
            vmax_x is set to None. The default is 99.75% for all channels.

        *stretch_r*: [ 'linear' | 'log' | 'sqrt' | 'arcsinh' | 'power' ]

        *stretch_g*: [ 'linear' | 'log' | 'sqrt' | 'arcsinh' | 'power' ]

        *stretch_b*: [ 'linear' | 'log' | 'sqrt' | 'arcsinh' | 'power' ]

            The stretch function to use for the different channels.

        *vmid_r*: [ None | float ]

        *vmid_g*: [ None | float ]

        *vmid_b*: [ None | float ]

            Baseline values used for the log and arcsinh stretches. If
            set to None, this is set to zero for log stretches and to
            vmin - (vmax - vmin) / 30. for arcsinh stretches

        *exponent_r*: [ float ]

        *exponent_g*: [ float ]

        *exponent_b*: [ float ]

            If stretch_x is set to 'power', this is the exponent to use.
        '''

    if not installed_pil:
        raise Exception("The Python Imaging Library (PIL) is not installed but is required for this function")

    if isinstance(data, basestring):

        image = pyfits.getdata(data)
        image_r = image[indices[0], :, :]
        image_g = image[indices[1], :, :]
        image_b = image[indices[2], :, :]

        # Read in header
        header = pyfits.getheader(data)

        # Remove information about third dimension
        header['NAXIS'] = 2
        for key in ['NAXIS', 'CTYPE', 'CRPIX', 'CRVAL', 'CUNIT', 'CDELT', 'CROTA']:
            for coord in range(3, 6):
                name = key + str(coord)
                if name in header:
                    header.__delitem__(name)

    elif (type(data) == list or type(data) == tuple) and len(data) == 3:

        filename_r, filename_g, filename_b = data
        image_r = pyfits.getdata(filename_r)
        image_g = pyfits.getdata(filename_g)
        image_b = pyfits.getdata(filename_b)

        # Read in header
        header = pyfits.getheader(filename_r)

    else:
        raise Exception("data should either be the filename of a FITS cube or a list/tuple of three images")

    logger.info("Red:")
    image_r = Image.fromarray(_data_stretch(image_r, \
                                            vmin=vmin_r, vmax=vmax_r, \
                                            pmin=pmin_r, pmax=pmax_r, \
                                            stretch=stretch_r, \
                                            vmid=vmid_r, \
                                            exponent=exponent_r))

    logger.info("\nGreen:")
    image_g = Image.fromarray(_data_stretch(image_g, \
                                            vmin=vmin_g, vmax=vmax_g, \
                                            pmin=pmin_g, pmax=pmax_g, \
                                            stretch=stretch_g, \
                                            vmid=vmid_g, \
                                            exponent=exponent_g))

    logger.info("\nBlue:")
    image_b = Image.fromarray(_data_stretch(image_b, \
                                            vmin=vmin_b, vmax=vmax_b, \
                                            pmin=pmin_b, pmax=pmax_b, \
                                            stretch=stretch_b, \
                                            vmid=vmid_b, \
                                            exponent=exponent_b))

    img = Image.merge("RGB", (image_r, image_g, image_b))
    img = img.transpose(Image.FLIP_TOP_BOTTOM)

    img.save(output)

    if embed_avm_tags:
        if not avm_installed:
            raise Exception("PyAVM needs to be installed in order to be able to embed AVM tags into the RGB image")
        else:
            avm = AVM(header)
            avm.embed(output, output)

    return
Esempio n. 7
0
def make_rgb_image(data,
                   output,
                   indices=(0, 1, 2),
                   vmin_r=None,
                   vmax_r=None,
                   pmin_r=0.25,
                   pmax_r=99.75,
                   stretch_r='linear',
                   vmid_r=None,
                   exponent_r=2,
                   vmin_g=None,
                   vmax_g=None,
                   pmin_g=0.25,
                   pmax_g=99.75,
                   stretch_g='linear',
                   vmid_g=None,
                   exponent_g=2,
                   vmin_b=None,
                   vmax_b=None,
                   pmin_b=0.25,
                   pmax_b=99.75,
                   stretch_b='linear',
                   vmid_b=None,
                   exponent_b=2,
                   make_nans_transparent=False,
                   embed_avm_tags=True):
    """
    Make an RGB image from a FITS RGB cube or from three FITS files.

    Parameters
    ----------

    data : str or tuple or list
        If a string, this is the filename of an RGB FITS cube. If a tuple
        or list, this should give the filename of three files to use for
        the red, green, and blue channel.

    output : str
        The output filename. The image type (e.g. PNG, JPEG, TIFF, ...)
        will be determined from the extension. Any image type supported by
        the Python Imaging Library can be used.

    indices : tuple, optional
        If data is the filename of a FITS cube, these indices are the
        positions in the third dimension to use for red, green, and
        blue respectively. The default is to use the first three
        indices.

    vmin_r, vmin_g, vmin_b : float, optional
        Minimum pixel value to use for the red, green, and blue channels.
        If set to None for a given channel, the minimum pixel value for
        that channel is determined using the corresponding pmin_x argument
        (default).

    vmax_r, vmax_g, vmax_b : float, optional
        Maximum pixel value to use for the red, green, and blue channels.
        If set to None for a given channel, the maximum pixel value for
        that channel is determined using the corresponding pmax_x argument
        (default).

    pmin_r, pmin_r, pmin_g : float, optional
        Percentile values used to determine for a given channel the
        minimum pixel value to use for that channel if the corresponding
        vmin_x is set to None. The default is 0.25% for all channels.

    pmax_r, pmax_g, pmax_b : float, optional
        Percentile values used to determine for a given channel the
        maximum pixel value to use for that channel if the corresponding
        vmax_x is set to None. The default is 99.75% for all channels.

    stretch_r, stretch_g, stretch_b : { 'linear', 'log', 'sqrt', 'arcsinh', 'power' }
        The stretch function to use for the different channels.

    vmid_r, vmid_g, vmid_b : float, optional
        Baseline values used for the log and arcsinh stretches. If
        set to None, this is set to zero for log stretches and to
        vmin - (vmax - vmin) / 30. for arcsinh stretches

    exponent_r, exponent_g, exponent_b : float, optional
        If stretch_x is set to 'power', this is the exponent to use.

    make_nans_transparent : bool, optional
        If set AND output is png, will add an alpha layer that sets pixels
        containing a NaN to transparent.

    embed_avm_tags : bool, optional
        Whether to embed AVM tags inside the image - this can only be done for
        JPEG and PNG files, and only if PyAVM is installed.
    """

    try:
        from PIL import Image
    except ImportError:
        try:
            import Image
        except ImportError:
            raise ImportError(
                "The Python Imaging Library (PIL) is required to make an RGB image"
            )

    if isinstance(data, six.string_types):

        image = fits.getdata(data)
        image_r = image[indices[0], :, :]
        image_g = image[indices[1], :, :]
        image_b = image[indices[2], :, :]

        # Read in header
        header = fits.getheader(data)

        # Remove information about third dimension
        header['NAXIS'] = 2
        for key in [
                'NAXIS', 'CTYPE', 'CRPIX', 'CRVAL', 'CUNIT', 'CDELT', 'CROTA'
        ]:
            for coord in range(3, 6):
                name = key + str(coord)
                if name in header:
                    header.__delitem__(name)

    elif (type(data) == list or type(data) == tuple) and len(data) == 3:

        filename_r, filename_g, filename_b = data
        image_r = fits.getdata(filename_r)
        image_g = fits.getdata(filename_g)
        image_b = fits.getdata(filename_b)

        # Read in header
        header = fits.getheader(filename_r)

    else:
        raise Exception(
            "data should either be the filename of a FITS cube or a list/tuple of three images"
        )

    # are we making a transparent layer?
    do_alpha = make_nans_transparent and output.lower().endswith('.png')

    if do_alpha:
        log.info("Making alpha layer")

        # initialize alpha layer
        image_alpha = np.empty_like(image_r, dtype=np.uint8)
        image_alpha[:] = 255

        # look for nans in images
        for im in [image_r, image_g, image_b]:
            image_alpha[np.isnan(im)] = 0

    log.info("Red:")
    image_r = Image.fromarray(
        _data_stretch(image_r,
                      vmin=vmin_r,
                      vmax=vmax_r,
                      pmin=pmin_r,
                      pmax=pmax_r,
                      stretch=stretch_r,
                      vmid=vmid_r,
                      exponent=exponent_r))

    log.info("Green:")
    image_g = Image.fromarray(
        _data_stretch(image_g,
                      vmin=vmin_g,
                      vmax=vmax_g,
                      pmin=pmin_g,
                      pmax=pmax_g,
                      stretch=stretch_g,
                      vmid=vmid_g,
                      exponent=exponent_g))

    log.info("Blue:")
    image_b = Image.fromarray(
        _data_stretch(image_b,
                      vmin=vmin_b,
                      vmax=vmax_b,
                      pmin=pmin_b,
                      pmax=pmax_b,
                      stretch=stretch_b,
                      vmid=vmid_b,
                      exponent=exponent_b))

    img = Image.merge("RGB", (image_r, image_g, image_b))

    if do_alpha:
        # convert to RGBA and add alpha layer
        image_alpha = Image.fromarray(image_alpha)
        img.convert("RGBA")
        img.putalpha(image_alpha)

    img = img.transpose(Image.FLIP_TOP_BOTTOM)

    img.save(output)

    if embed_avm_tags:

        try:
            import pyavm
        except ImportError:
            warnings.warn(
                "PyAVM 0.9.1 or later is not installed, so AVM tags will not be embedded in RGB image"
            )
            return

        if version.LooseVersion(
                pyavm.__version__) < version.LooseVersion('0.9.1'):
            warnings.warn(
                "PyAVM 0.9.1 or later is not installed, so AVM tags will not be embedded in RGB image"
            )
            return

        from pyavm import AVM

        if output.lower().endswith(('.jpg', '.jpeg', '.png')):
            avm = AVM.from_header(header)
            avm.embed(output, output)
        else:
            warnings.warn(
                "AVM tags will not be embedded in RGB image, as only JPEG and PNG files are supported"
            )
Esempio n. 8
0
def main(argv):

	config_file=None
	list_file=None

	try:
		opts, args = getopt.getopt(argv,"hc:l:p:f:",["config=","list=","prefix=","filters="])
	except getopt.GetoptError:
		print 'make_rgb_image.py -c <configuration file> -l <image file list> -p <prefix> -f <filters>'
		sys.exit(2)

	for opt, arg in opts:
		if opt == '-h':
			print 'make_rgb_image.py -c <configuration file> -l <image file list> -p <prefix> -f <filters>'
			sys.exit()
		elif opt in ("-c", "--config"):
			config_file = arg
		elif opt in ("-l", "--list"):
			list_file = arg
		elif opt in ("-p", "--prefix"):
			prefix = arg
		elif opt in ("-f", "--filters"):
			filters = string.split(arg,',')

	if config_file is None or list_file is None:
		raise ArgumentError("You need to supply a configuration file and an image file list")

	print 'Configuration file is ', config_file
	print 'Image file list is ', list_file

	config_data = ConfigParser.ConfigParser()
	config_data.read(config_file)

	prefix=config_data.get('Stack','prefix')
	filters=string.split(config_data.get('Stack','filters'),',')
	stack_name=config_data.get('Stack','stack_name')
	stack_version=config_data.get('Stack','stack_version')
	stack_tile_ref=config_data.get('Stack','stack_tile_ref')
	stack_filter_ref=config_data.get('Stack','stack_filter_ref')

	list_data = np.loadtxt(list_file, dtype={'names': ('tile', 'image', 'catalog'), 'formats': ('S10', 'S50', 'S50')})

	stack_tile=np.unique(list_data['tile'])
	image_file=list_data['image']
	catalog_file=list_data['catalog']

	stack_filter=filters
	print "List of tiles: ", stack_tile
	print "List of filters: ", stack_filter_ref



	im_rgb_hd_file= '../catalogs/ngfs_tile1_rgb_asinh_v2.jpg'
	im_rgb_sd_file= '../catalogs/ngfs_tile1_rgb_asinh_v2_sd.png'
	cat_ngfs_nuc=ascii.read('../catalogs/NGFS_FCC_cat_nucleated.dat', format='fixed_width') #, fill_values=('', 'NA'))
	cat_ngfs_non=ascii.read('../catalogs/NGFS_FCC_cat_non_nucleated.dat', format='fixed_width') #, fill_values=('', 'NA'))

	decam_scale=0.263*u.arcsec
	fornax_distance=20.0*u.Mpc
	dwarf_zoom_radius=Angle(30*u.arcsec)

	#cat_ngfs_nuc.add_row()

	cat_ngfs=cat_ngfs_nuc
	cat_ngfs.add_column( Column( np.full(len(cat_ngfs),np.nan), name='mu_i' ) )
	cat_ngfs['mu_i']=cat_ngfs['m_i']+2.5*np.log10(2*np.pi* cat_ngfs['reff_arcsec']**2 )

	gv=cat_ngfs['Reference'].mask.nonzero()  # Look for the masked values
	cat_ngfs['Reference'][gv]=''  # Replace the masked values with a null string

	n=np.int(np.ceil(np.sqrt(len(cat_ngfs))))
	fig = plt.figure(figsize=(14.,14.), dpi=300)
	gs=gridspec.GridSpec(n, n)
	gs.update(left=0.03, right=0.97, bottom=0.03, top=0.97, wspace=0.2, hspace=0.2)

	# Now reading the HD rgb image

	im_data = np.flipud(skimage.io.imread(im_rgb_hd_file))
	im_size= im_data.shape
	avm=AVM.from_image(im_rgb_hd_file)
	w = avm.to_wcs()
	w.naxis1=im_size[1]
	w.naxis2=im_size[0]

	# Sort them by magnitude
	cat_ngfs.sort('M_i')
	cat_ngfs_coo = SkyCoord(cat_ngfs['RA'], cat_ngfs['DEC'], unit="deg")

	for i in np.arange(len(cat_ngfs)):
		print 'Processing NGFS dwarf ', cat_ngfs['ID'][i]

		ax=plt.subplot(gs[i])
		ax.set_aspect('equal')
		ax.axis('off')
		
		im_crop_coo=w.wcs_world2pix([[ cat_ngfs_coo.ra[i].deg,(cat_ngfs_coo.dec[i]+dwarf_zoom_radius).deg],[cat_ngfs_coo.ra[i].deg,(cat_ngfs_coo.dec[i]-dwarf_zoom_radius).deg]], 1)
		im_crop_size=(np.abs(im_crop_coo[0,1]-im_crop_coo[1,1])*np.asarray([1.,1.])).astype(int)
		im_crop_coo=(w.wcs_world2pix([[ cat_ngfs_coo.ra[i].deg, cat_ngfs_coo.dec[i].deg]], 1)[0]).astype(int)
		im_crop_data=im_data[im_crop_coo[1]-im_crop_size[1]/2:im_crop_coo[1]+im_crop_size[1]/2,im_crop_coo[0]-im_crop_size[0]/2:im_crop_coo[0]+im_crop_size[0]/2]
		skimage.io.imsave('dwarf_zoom.png', np.flipud(im_crop_data))

		im_crop_size= im_crop_data.shape
		w_crop=w[:]
		w_crop.naxis1=im_crop_size[1]
		w_crop.naxis2=im_crop_size[0]
		w_crop.wcs.crpix[0] -= (im_crop_coo[0]-im_crop_size[0]/2)
		w_crop.wcs.crpix[1] -= (im_crop_coo[1]-im_crop_size[1]/2)
		
		f = aplpy.FITSFigure(w_crop, figure=fig, subplot=list(ax.get_position().bounds) )
		f.axis_labels.hide()
		f.tick_labels.hide()
		f.ticks.hide()
		f.show_rgb('dwarf_zoom.png')
	#	f.add_label( (cat_ngfs_coo.ra[i]+dwarf_zoom_radius*1.).deg, (cat_ngfs_coo.dec[i]-dwarf_zoom_radius*0.8).deg, cat_ngfs['ID'][i], size=7, color='silver', horizontalalignment='left', alpha=0.8, family='sans-serif', style='italic' )
	#	f.add_label( (cat_ngfs_coo.ra[i]+dwarf_zoom_radius*1.).deg, (cat_ngfs_coo.dec[i]-dwarf_zoom_radius*0.95).deg, cat_ngfs['Reference'][i], size=7, color='silver', horizontalalignment='left', alpha=0.8, family='sans-serif', style='italic' )

	fig.savefig('../figures/NGFS_dwarfs_nucleated_tile1_SORT_total_magnitude_nolabels.pdf', dpi=300)
	sys.exit()

	cat_ngfs.sort('mu_i')
	cat_ngfs_coo = SkyCoord(cat_ngfs['RA'], cat_ngfs['DEC'], unit="deg")

	for i in np.arange(len(cat_ngfs)):
		print 'Processing NGFS dwarf ', cat_ngfs['ID'][i]

		ax=plt.subplot(gs[i])
		ax.set_aspect('equal')
		ax.axis('off')
		
		im_crop_coo=w.wcs_world2pix([[ cat_ngfs_coo.ra[i].deg,(cat_ngfs_coo.dec[i]+dwarf_zoom_radius).deg],[cat_ngfs_coo.ra[i].deg,(cat_ngfs_coo.dec[i]-dwarf_zoom_radius).deg]], 1)
		im_crop_size=(np.abs(im_crop_coo[0,1]-im_crop_coo[1,1])*np.asarray([1.,1.])).astype(int)
		im_crop_coo=(w.wcs_world2pix([[ cat_ngfs_coo.ra[i].deg, cat_ngfs_coo.dec[i].deg]], 1)[0]).astype(int)
		im_crop_data=im_data[im_crop_coo[1]-im_crop_size[1]/2:im_crop_coo[1]+im_crop_size[1]/2,im_crop_coo[0]-im_crop_size[0]/2:im_crop_coo[0]+im_crop_size[0]/2]
		skimage.io.imsave('dwarf_zoom.png', np.flipud(im_crop_data))

		im_crop_size= im_crop_data.shape
		w_crop=w[:]
		w_crop.naxis1=im_crop_size[1]
		w_crop.naxis2=im_crop_size[0]
		w_crop.wcs.crpix[0] -= (im_crop_coo[0]-im_crop_size[0]/2)
		w_crop.wcs.crpix[1] -= (im_crop_coo[1]-im_crop_size[1]/2)
		
		f = aplpy.FITSFigure(w_crop, figure=fig, subplot=list(ax.get_position().bounds) )
		f.axis_labels.hide()
		f.tick_labels.hide()
		f.ticks.hide()
		f.show_rgb('dwarf_zoom.png')
		f.add_label( (cat_ngfs_coo.ra[i]+dwarf_zoom_radius*1.).deg, (cat_ngfs_coo.dec[i]-dwarf_zoom_radius*0.8).deg, cat_ngfs['ID'][i], size=7, color='silver', horizontalalignment='left', alpha=0.8, family='sans-serif', style='italic' )
		f.add_label( (cat_ngfs_coo.ra[i]+dwarf_zoom_radius*1.).deg, (cat_ngfs_coo.dec[i]-dwarf_zoom_radius*0.95).deg, cat_ngfs['Reference'][i], size=7, color='silver', horizontalalignment='left', alpha=0.8, family='sans-serif', style='italic' )

	fig.savefig('../figures/NGFS_dwarfs_nucleated_tile1_SORT_surface_brightness.pdf', dpi=300)
Esempio n. 9
0
def overlay(
        image,
        bdf,  # basic inputs
        l1=0.0,
        l2=1.0,  # levels
        shift=[0.0, 0.0],  # target shift from reference value 
        radec=[0.0, 0.0],  # target coords, if not ref value (d/s)
        nod=[0, 0],  # shift from default ctr (in asec)
        showGrid=False,  # show coordinate grid?
        stretch='linear',  # 'log', 'linear', etc.     
        invert=False,  # invert a grayscale image?
        hdu=0,  # HDU, if not 0. 
        gray=True,
        readAVM=False):  # plot colour, AVM read
    """ 
    Overlay a SAMI bundle onto a fits image 

    Adapted to astropy input. 

    Inputs 
    -------
      image: fits image used for overlay; 
      bdf:   a 'bundle definition file', generate with the 'bundle_definition' 
             function in this module. 
    """

    import aplpy
    import astropy.wcs as pywcs

    # is the input image a fits file?
    isfits = (image[len(image) - 5:] == '.fits') or (image[len(image) - 4:]
                                                     == '.fit')

    # Use APLPy to read in the FITS file.
    fig = aplpy.FITSFigure(image, north=True, hdu=hdu)
    if (gray == True):
        fig.show_grayscale(vmin=l1, vmax=l2, stretch=stretch, invert=invert)
    else:
        fig.show_rgb()
    if showGrid == True: fig.show_grid()

    # Read the AVM of a jpg or tiff image:
    if readAVM == True:
        from pyavm import AVM
        avm = AVM(image)

    # read BDF
    tab = tab.read(bdf)

    # Get field centre coordinates -- quite messy, clean up.
    ctr = [0., 0.]  # just initiate the field centre (list ok)

    # Input type: image loaded with Astro Visualisation Metadata --
    if (np.mean(radec) == 0.0) and (readAVM != True) and (isfits != True):
        ctr = [0.0, 0.0]  # if cannot find AVM, ctr=0,0
        print("Warning: did not find a valid field centre definition")
    if (np.mean(radec) != 0.0) and (isfits != True):  # respec' user input
        radec = np.array(radec)
        if radec.size > 2:  # if input in sex, not dec
            from SAMI_sdss import ten
            ctr[0] = ten(radec[0], radec[1], radec[2], RA=True)
            ctr[1] = ten(radec[3], radec[4], radec[5])
        else:
            ctr = radec

    if readAVM == True and (np.mean(radec) == 0.0):
        ctr = avm.Spatial.ReferenceValue  # read AVM field centre

    # Input type: fits file --
    if isfits:
        data = pf.open(image)
        wcs = pywcs.WCS(data[0].header)
        ctr = wcs.wcs.crval

    # apply 'nod' (fine positioning shift)
    if (nod[0] != 0) and (nod[1] != 0):
        nod[0] = nod[0] * 15
        nod = np.array(nod) / 3600.
        ctr = np.array(ctr) - nod
        from SAMI_sdss import sixty
        stringer1 = 'Recentering to: ' + str(ctr[0]) + ' ' + str(ctr[1])
        stringer2 = '            ie: ' + str(sixty(ctr[0],RA=True)) + \
            ' ' + str(sixty(ctr[1]))
        print('')
        print(stringer1)
        print(stringer2)

    # shift SAMI bundle into place
    ra = tab['RA'] / np.cos(np.radians(ctr[1])) + ctr[0]
    dec = tab['DEC'] + ctr[1]

    # SAMI bundle (individual fibres)
    fig.show_circles(ra,
                     dec,
                     0.8 / 3600.,
                     edgecolor='cyan',
                     facecolor='cyan',
                     alpha=0.5)

    # Exclusion radius
    fig.show_circles(ctr[0],
                     ctr[1],
                     165. / 3600.,
                     edgecolor='green',
                     facecolor='none',
                     linewidth=3.)
Esempio n. 10
0
def go(
    fits_paths = None,
    rgb_path = None,
    output_path = None,
    tile_path = None,
    work_dir = '',
    anet_bin_prefix = '',
):
    """
    Do the whole thing.
    """
    cfg = AlignmentConfig()
    index_fits_list = []
    scale_low = scale_high = None

    for fits_num, fits_path in enumerate(fits_paths):
        logger.info('Processing reference science image `%s` ...', fits_path)

        try:
            info = source_extract_fits(fits_path, log_prefix='  ')
        except Exception as e:
            logger.warning('  Failed to extract sources from this file')
            logger.warning('  Caused by: %s', e)
            continue

        objects_fits = os.path.join(work_dir, f'objects{fits_num}.fits')
        info.wcs_objects.write(objects_fits, format='fits', overwrite=True)

        # Generate the Astrometry.Net index

        index_fits = os.path.join(work_dir, f'index{fits_num}.fits')
        index_log = os.path.join(work_dir, f'build-index-{fits_num}.log')

        try:
            index_extracted_image(
                objects_fits,
                index_fits,
                index_log = index_log,
                extraction_info = info,
                index_unique_key = str(fits_num),
                anet_bin_prefix = anet_bin_prefix,
                log_prefix = '  ',
            )
        except Exception as e:
            logger.warning('  Failed to index this file')
            logger.warning('  Caused by: %s', e)
            continue

        # Success!

        index_fits_list.append(index_fits)

        this_scale_low = info.width_deg * 60 / cfg.scale_range_factor  # units are image width in arcmin
        this_scale_high = info.width_deg * 60 * cfg.scale_range_factor

        if scale_low is None:
            scale_low = this_scale_low
            scale_high = this_scale_high
        else:
            scale_low = min(scale_low, this_scale_low)
            scale_high = max(scale_high, this_scale_high)

    if not index_fits_list:
        raise Exception('cannot align: failed to index any of the input FITS files')

    # Write out config file

    cfg_path = os.path.join(work_dir, 'aligner.cfg')

    with open(cfg_path, 'wt') as f:
        print('add_path', work_dir, file=f)
        print('inparallel', file=f)

        for p in index_fits_list:
            print('index', p, file=f)

    # Solve our input image

    wcs_file = os.path.join(work_dir, 'solved.fits')

    # https://manpages.debian.org/testing/astrometry.net/solve-field.1.en.html
    argv = [
        anet_bin_prefix + 'solve-field',
        '-v',
        '--config', cfg_path,
        '--scale-units', 'arcminwidth',
        '--scale-low', str(scale_low),
        '--scale-high', str(scale_high),
        '--cpulimit', str(cfg.solve_time_limit_seconds),
        '--objs', str(cfg.object_limit),
        '--dir', work_dir,
        '-N', wcs_file,
        '--no-plots',
        '--no-tweak',
        '--downsample', str(cfg.downsample_factor),
        rgb_path,
    ]
    logger.debug('solve command: %s', ' '.join(argv))

    solve_log = os.path.join(work_dir, 'solve-field.log')
    logger.info('Launching Astrometry.Net solver for `%s` ...', rgb_path)

    try:
        with open(solve_log, 'wb') as log:
            subprocess.check_call(
                argv,
                stdout = log,
                stderr = subprocess.STDOUT,
                shell = False,
            )

        assert os.path.exists(wcs_file), 'Astrometry.Net did not emit a solution file'
    except Exception as e:
        logger.error('  Failed to solve this image')
        logger.error('  Proximate Python exception: %s', e)
        logger.error('  Output from solve-field:')

        try:
            with open(solve_log, 'r') as f:
                for line in f:
                    logger.error('    %s', line.rstrip())
        except Exception as sub_e:
            logger.error('     [failed to read the log! error: %s]', sub_e)

        raise

    # Convert solution to AVM, with hardcoded parity inversion.
    #
    # TODO: map positive parity into both AVM and WWT metadata correctly.

    img = ImageLoader().load_path(rgb_path)

    with fits.open(wcs_file) as hdul:
        header = hdul[0].header
        wcs = WCS(header)

    hdwork = wcs.to_header()
    hdwork['CRPIX2'] = img.height + 1 - hdwork['CRPIX2']
    hdwork['PC1_2'] *= -1
    hdwork['PC2_2'] *= -1
    wcs = WCS(hdwork)
    avm = AVM.from_wcs(wcs, shape=(img.height, img.width))

    # Apply AVM
    #
    # pyavm can't convert image formats, so if we've been asked to emit a tagged
    # imagine in a format different than the image format, we need to do that
    # conversion manually. We're not in a great position to be clever so we
    # assess "format" from filename extensions.

    in_name_pieces = os.path.splitext(os.path.basename(rgb_path))

    if output_path is None:
        output_path = in_name_pieces[0] + '_tagged' + in_name_pieces[1]

    input_ext = in_name_pieces[1].lower()
    output_ext = os.path.splitext(output_path)[1].lower()

    if input_ext != output_ext:
        logger.info('Converting input image to create `%s`', output_path)
        img.save(output_path, format=output_ext.replace('.', ''))

        logger.info('Adding AVM tags to `%s`', output_path)
        avm.embed(output_path, output_path)
    else:
        logger.info('Writing AVM-tagged image to:`%s`', output_path)
        avm.embed(rgb_path, output_path)

    # Tile it for WWT, if requested

    if tile_path is not None:
        from toasty.merge import averaging_merger, cascade_images
        from toasty.pyramid import PyramidIO

        logger.info('Creating base layer of WWT tiling ...')

        pio = PyramidIO(tile_path, default_format=img.default_format)
        builder = Builder(pio)
        builder.make_thumbnail_from_other(img)
        builder.tile_base_as_study(img, cli_progress=True)
        builder.apply_wcs_info(wcs, img.width, img.height)
        builder.set_name(in_name_pieces[0])
        builder.write_index_rel_wtml()

        logger.info('Cascading tiles ...')
        cascade_images(
            pio,
            builder.imgset.tile_levels,
            averaging_merger,
            cli_progress=True
        )