예제 #1
0
def get_psf_variation(file,
                      index,
                      radius,
                      out_file=None,
                      normalize=None,
                      debug=False):
    if isinstance(index, list):
        if len(index) is 1:
            if index[0] is 0:
                logger.info(
                    f"Estimate image intensity peak and use as aperture index")
                image = fits.getdata(file)
                if image.ndim == 3:
                    image = np.sum(image, axis=0)
                index = np.unravel_index(np.argmax(image), image.shape)
                logger.info(f"Index is set to {index}")
            else:
                index = (index[0], index[0])
        index = tuple(index)

    if file is None:
        raise RuntimeError("No file was provided!")

    if out_file is None:
        out_file = "var_" + os.path.basename(file).replace(".fits", ".dat")

    # Initialize the aperture
    aperture = Aperture(index, radius, data=file, crop=True)
    if debug:
        imshow(aperture.get_integrated(), maximize=False)

    # Extract PSF profile
    logger.info(f"Extracting PSF profile from file {file}")
    xdata, ydata, edata = aperture.get_psf_variance()

    # Normalize profile
    if normalize == 'peak':
        ydata /= ydata[0]
        edata /= ydata[0]
    elif normalize == 'aperture':
        ydata /= ydata[-1]
        edata /= ydata[-1]
    elif normalize is not None:
        raise ValueError(
            "Normalize must be either 'peak', 'aperture, or None!'")

    # Save encircled energy data to outfile
    out_table = Table(data=[xdata, ydata, edata],
                      names=['Radius', 'Variance', 'dVariance'])
    logger.info(f"Store PSF profile to {out_file}")
    out_table.write(out_file, overwrite=True, format='ascii.fixed_width')
예제 #2
0
    def extract(self, mode='align_median', file_shifts=None, inspect_aperture=False):
        if 'median' in mode:
            combine = np.median
        elif 'mean' in mode:
            combine = np.mean
        else:
            raise ValueError('PSFExtraction received unknown mode for extract method ({}).'.format(mode))

        self.params.psfFiles = []
        for file_index, file in enumerate(self.params.inFiles):
            # Initialize file by file
            logging.info("Extracting PSFs from file {}".format(file))
            psf_file = PSFFile(file, out_dir=self.params.tmpDir, frame_shape=(self.box_size, self.box_size))
            self.params.psfFiles.append(psf_file.filename)
            if file_shifts is None:
                file_shift = (0, 0)
            else:
                file_shift = file_shifts[file_index]
            self.init_ref_apertures(file, shift=file_shift)
            frame_number = fits.getheader(file)['NAXIS3']

            # Check apertures visually
            if inspect_aperture:
                for index, aperture in enumerate(self.ref_apertures):
                    imshow(aperture.get_integrated(), title="Inspect reference aperture {}".format(index + 1))

            # Extract the PSF by combining the aperture frames in the desired mode
            for frame_index in range(frame_number):
                print("\r\tExtracting PSF from frame {}/{}".format(frame_index + 1, frame_number), end='')
                psf = np.empty((len(self.ref_apertures), self.box_size, self.box_size))
                for aperture_index, aperture in enumerate(self.ref_apertures):
                    # Copy aperture into psf
                    if 'align' in mode:
                        psf[aperture_index] = shift(aperture[frame_index], shift=(aperture.xoffset, aperture.yoffset))
                    elif 'resample' in mode:
                        pass
                    else:
                        psf[aperture_index] = aperture[frame_index]
                    # Normalization of each psf to make median estimate sensible
                    psf[aperture_index] /= np.sum(psf[aperture_index])

                psf = combine(psf, axis=0)
                psf_file.update_frame(frame_index, psf)
            print('\r')
예제 #3
0
 def test_pad_array(self):
     pad_vectors, ref_pad_vector = alignment.get_pad_vectors(
         self.shifts,
         cube_mode=False,
         return_reference_image_pad_vector=True)
     padded = alignment.pad_array(np.ones(self.image_shape),
                                  pad_vectors[1],
                                  mode='same',
                                  reference_image_pad_vector=ref_pad_vector)
     imshow(padded)
     pad_vectors, ref_pad_vector = alignment.get_pad_vectors(
         self.shifts,
         cube_mode=True,
         return_reference_image_pad_vector=True)
     for pad_vector in pad_vectors:
         padded = alignment.pad_array(
             np.ones(self.cube_shape),
             pad_vector=pad_vector,
             mode='same',
             reference_image_pad_vector=ref_pad_vector)
예제 #4
0
def main(options=None):

    args = parser(options=options)

    # Interprete input
    args.index = tuple(args.index)

    if args.file is None:
        raise RuntimeError("No file was provided!")

    if args.outfile is None:
        outfile = os.path.basename(args.file)
        outfile = "psf_" + outfile.replace(".fits", ".dat")
        # outfile = os.path.join(args.outdir, outfile)

    # Initialize the aperture
    aperture = Aperture(args.index, args.radius, data=args.file, crop=True)
    # peak = aperture.get_aperture_peak()
    # aperture = Aperture(peak, args.radius, data=args.file, crop=True)
    if args.debug:
        imshow(aperture.get_integrated(), maximize=args.maximize)

    xdata, ydata = aperture.get_psf_profile()

    if args.normalize == 'peak':
        ydata /= ydata[0]
    elif args.normalize == 'aperture':
        ydata /= ydata[-1]
    elif args.normalize is not None:
        raise ValueError(
            "Normalize must be either 'peak', 'aperture, or None!'")

    # Save encircled energy data to outfile
    header = "Radius Flux"
    data = np.concatenate(([xdata], [ydata]), axis=0).transpose()
    np.savetxt(outfile, data, header=header)

    if args.debug:
        psf_profile_plot(outfile, maximize=args.maximize)
예제 #5
0
    def test_init(self):
        telescope_static_psf = Telescope(
            8.0 * u.m,
            central_obscuration=0.14,
            name="VLT Unit Telescope",
            psf_source=self.scao_long_exposure_psf_file)
        assert np.abs(np.sum(telescope_static_psf.psf) - 1.0) < 1e-6

        telescope_gaussian_psf = Telescope(8.2 * u.m,
                                           psf_source='Gaussian',
                                           radius=0.1645 * u.arcsec,
                                           psf_resolution=0.0106 * u.arcsec)
        assert np.abs(np.sum(telescope_gaussian_psf.psf) - 1.0) < 1e-6
        if self.visual > 0:
            imshow(telescope_gaussian_psf.psf, title="Test 'Gaussian' model")

        telescope_airydisk_psf = Telescope(8.2 * u.m,
                                           psf_source='AiryDisk',
                                           radius=0.4777 * u.arcsec,
                                           psf_resolution=0.0106 * u.arcsec)
        if self.visual > 0:
            imshow(telescope_airydisk_psf.psf, title="Test 'AiryDisk' model")
예제 #6
0
 def test_plot_powerspec1d(self):
     psf_image = psf(self.aperture)
     plots.imshow(psf_image, title="PSF", norm=LogNorm())
     plots.plot_powerspec1d(psf_image)
예제 #7
0
파일: ssa.py 프로젝트: deepin00/specklepy
def ssa(files,
        mode='same',
        reference_file=None,
        outfile=None,
        in_dir=None,
        tmp_dir=None,
        lazy_mode=True,
        box_indexes=None,
        debug=False,
        **kwargs):
    """Compute the SSA reconstruction of a list of files.

    The simple shift-and-add (SSA) algorithm makes use of the structure of typical speckle patterns, i.e.
    short-exposure point-spread functions (PSFs). These show multiple peaks resembling the diffraction-limited PSF of
    coherent fractions within the telescope aperture. Under good conditions or on small telescopes, there is typically
    one largest coherent atmospheric cell and therefore, speckle PSFs typically show one major intensity peak. The
    algorithm makes use of this fact and identifies the emission peak in a given observation frame, assuming that this
    always belongs to the same star, and aligns all frames on the coordinate of the emission peak.

    See Bates & Cady (1980) for references.

    Args:
        files (list or array_like):
            List of complete paths to the fits files that shall be considered for the SSA reconstruction.
        mode (str):
            Name of the reconstruction mode: In 'same' mode, the reconstruction covers the same field of view of the
            reference file. In 'full' mode, every patch of the sky that is covered by at least one frame will be
            contained in the final reconstruction.
        reference_file (str, int, optional):
            Path to a reference file or index of the file in files, relative to which the shifts are computed. See
            specklepy.core.aligment.get_shifts for details. Default is 0.
        outfile (specklepy.io.recfile, optional):
            Object to write the result to, if provided.
        in_dir (str, optional):
            Path to the files. `None` is substituted by an empty string.
        tmp_dir (str, optional):
            Path of a directory in which the temporary results are stored in.
        lazy_mode (bool, optional):
            Set to False, to enforce the alignment of a single file with respect to the reference file. Default is True.
        box_indexes (list, optional):
            Constraining the search for the intensity peak to the specified box. Searching the full frames if not
            provided.
        debug (bool, optional):
            Show debugging information. Default is False.

    Returns:
        reconstruction (np.ndarray):
            The image reconstruction. The size depends on the mode argument.
    """

    logger.info("Starting SSA reconstruction...")
    # Check parameters
    if not isinstance(files, (list, np.ndarray)):
        if isinstance(files, str):
            files = [files]
        else:
            raise SpecklepyTypeError('ssa()',
                                     argname='files',
                                     argtype=type(files),
                                     expected='list')

    if isinstance(mode, str):
        if mode not in ['same', 'full', 'valid']:
            raise SpecklepyValueError('ssa()',
                                      argname='mode',
                                      argvalue=mode,
                                      expected="'same', 'full' or 'valid'")
    else:
        raise SpecklepyTypeError('ssa()',
                                 argname='mode',
                                 argtype=type(mode),
                                 expected='str')

    if reference_file is None:
        reference_file = files[0]
    elif isinstance(reference_file, int):
        reference_file = files[reference_file]
    elif not isinstance(reference_file, str):
        raise SpecklepyTypeError('ssa()',
                                 argname='reference_file',
                                 argtype=type(reference_file),
                                 expected='str or int')

    if outfile is None:
        pass
    elif isinstance(outfile, str):
        outfile = ReconstructionFile(files=files,
                                     filename=outfile,
                                     cards={"RECONSTRUCTION": "SSA"})
    elif isinstance(outfile, ReconstructionFile):
        pass
    else:
        raise SpecklepyTypeError('ssa()',
                                 argname='outfile',
                                 argtype=type(outfile),
                                 expected='str')

    if in_dir is None:
        in_dir = ''
    reference_file = os.path.join(in_dir, reference_file)

    if tmp_dir is not None:
        if isinstance(tmp_dir, str) and not os.path.isdir(tmp_dir):
            os.makedirs(tmp_dir)

    if not isinstance(lazy_mode, bool):
        raise SpecklepyTypeError('ssa()',
                                 argname='lazy_mode',
                                 argtype=type(lazy_mode),
                                 expected='bool')

    if box_indexes is not None:
        box = Box(box_indexes)
    else:
        box = None

    if 'variance_extension_name' in kwargs.keys():
        var_ext = kwargs['variance_extension_name']
    else:
        var_ext = 'VAR'

    if debug:
        logger.setLevel('DEBUG')
        logger.handlers[0].setLevel('DEBUG')
        logger.info("Set logging level to DEBUG")

    # Align reconstructions if multiple files are provided
    if lazy_mode and len(files) == 1:

        # Do not align just a single file
        with fits.open(os.path.join(in_dir, files[0])) as hdu_list:
            cube = hdu_list[0].data
            if var_ext in hdu_list:
                var_cube = hdu_list[var_ext].data
            else:
                var_cube = None
            reconstruction, reconstruction_var = coadd_frames(
                cube, var_cube=var_cube, box=box)

    else:

        # Compute temporary reconstructions of the individual cubes
        tmp_files = []
        for index, file in enumerate(files):
            with fits.open(os.path.join(in_dir, file)) as hdu_list:
                cube = hdu_list[0].data
                if var_ext in hdu_list:
                    var_cube = hdu_list[var_ext].data
                    logger.debug(
                        f"Found variance extension {var_ext} in file {file}")
                else:
                    logger.debug(
                        f"Did not find variance extension {var_ext} in file {file}"
                    )
                    var_cube = None
                tmp, tmp_var = coadd_frames(cube, var_cube=var_cube, box=box)

            if debug:
                imshow(box(tmp), norm='log')

            tmp_file = os.path.basename(file).replace(".fits", "_ssa.fits")
            tmp_file = os.path.join(tmp_dir, tmp_file)
            logger.info(
                "Saving interim SSA reconstruction of cube to {}".format(
                    tmp_file))
            tmp_file_object = Outfile(tmp_file, data=tmp, verbose=True)

            # Store variance of temporary reconstruction
            if tmp_var is not None:
                tmp_file_object.new_extension(var_ext, data=tmp_var)
                del tmp_var
            tmp_files.append(tmp_file)

        # Align tmp reconstructions and add up
        file_shifts, image_shape = alignment.get_shifts(
            tmp_files,
            reference_file=reference_file,
            return_image_shape=True,
            lazy_mode=True)
        pad_vectors, ref_pad_vector = alignment.get_pad_vectors(
            file_shifts,
            cube_mode=(len(image_shape) == 3),
            return_reference_image_pad_vector=True)

        # Iterate over file-wise reconstructions
        reconstruction = None
        reconstruction_var = None
        for index, file in enumerate(tmp_files):

            # Read data
            with fits.open(file) as hdu_list:
                tmp_image = hdu_list[0].data
                if var_ext in hdu_list:
                    tmp_image_var = hdu_list[var_ext].data
                else:
                    tmp_image_var = None

            # Initialize or co-add reconstructions and var images
            if reconstruction is None:
                reconstruction = alignment.pad_array(
                    tmp_image,
                    pad_vectors[index],
                    mode=mode,
                    reference_image_pad_vector=ref_pad_vector)
                if tmp_image_var is not None:
                    reconstruction_var = alignment.pad_array(
                        tmp_image_var,
                        pad_vectors[index],
                        mode=mode,
                        reference_image_pad_vector=ref_pad_vector)
            else:
                reconstruction += alignment.pad_array(
                    tmp_image,
                    pad_vectors[index],
                    mode=mode,
                    reference_image_pad_vector=ref_pad_vector)
                if tmp_image_var is not None:
                    reconstruction_var += alignment.pad_array(
                        tmp_image_var,
                        pad_vectors[index],
                        mode=mode,
                        reference_image_pad_vector=ref_pad_vector)
    logger.info("Reconstruction finished...")

    # Save the result to an Outfile
    if outfile is not None:
        outfile.data = reconstruction
        if reconstruction_var is not None:
            outfile.new_extension(name=var_ext, data=reconstruction_var)

    # Return reconstruction (and the variance map if computed)
    if reconstruction_var is not None:
        return reconstruction, reconstruction_var
    return reconstruction
예제 #8
0
 def test_call(self):
     apodize(self.object, 'Gaussian', radius=16)
     apodize(self.object, 'Airy', radius=16)
     imshow(np.abs(apodize(self.object, 'Gaussian', radius=20)))
예제 #9
0
 def test_weighted_combin(self):
     mean, weights = weighted_mean(self.data, axis=0)
     mean, weights = weighted_mean(self.data, axis=0, vars=self.vars)
     imshow(mean)
     imshow(weights)
예제 #10
0
    def extract_epsfs(self, file_shifts=None, oversampling=4, debug=False):
        """Extract effective PSFs following Anderson & King (2000).

        Args:
            file_shifts (list, optional):
                List of frame shifts for each of the files with respect to the reference file. These will be used to
                adapt the reference star positions. Default is None.
            oversampling (int, optional):
                Factor of oversampling the input pixel grid. Default is 4.
            debug (bool, optional):
                Shows the (integrated) apertures if set to True. Default is False.

        Returns:
            psf_files (list):
                List of file names where the PSFs are stored in.
        """
        # Create a list of psf files and store it to params
        psf_files = []

        # Iterate over input files
        for file_index, file in enumerate(self.in_files):
            # Initialize file by file
            logger.info("Extracting PSFs from file {}".format(file))
            psf_file = PSFFile(file,
                               out_dir=self.save_dir,
                               frame_shape=(self.box_size, self.box_size),
                               in_dir=self.in_dir,
                               header_card_prefix="HIERARCH SPECKLEPY")
            psf_files.append(psf_file.filename)

            # Consider alignment of cubes when initializing the apertures, i.e.
            # the position of the aperture in the shifted cube
            if file_shifts is None:
                apertures = self.init_apertures(file)
            else:
                apertures = self.init_apertures(file,
                                                shift=file_shifts[file_index])

            # Extract the number of frames in the FITS file from the header
            frame_number = fits.getheader(os.path.join(self.in_dir,
                                                       file))['NAXIS3']

            # Extract the PSF by combining the aperture frames in the desired mode
            for frame_index in trange(frame_number,
                                      desc="Extracting PSF from frames"):

                if debug:
                    if frame_index > 0:
                        break

                # Initialize oversampled grids
                epsf_oversampled = np.zeros((self.box_size * oversampling,
                                             self.box_size * oversampling))
                ivar_oversampled = np.zeros((self.box_size * oversampling,
                                             self.box_size * oversampling))

                for aperture_index, aperture in enumerate(apertures):
                    xoff = np.floor(
                        aperture.xoffset *
                        oversampling).astype(int) + oversampling // 2
                    yoff = np.floor(
                        aperture.yoffset *
                        oversampling).astype(int) + oversampling // 2

                    # Getting coordinates of aperture and stretching to oversampled image
                    y, x = np.mgrid[0:self.box_size, 0:self.box_size]
                    x *= oversampling
                    y *= oversampling
                    x += xoff
                    y += yoff

                    epsf_oversampled[y, x] += aperture.data[frame_index]
                    ivar_oversampled[y, x] += np.divide(1, aperture.vars)

                if debug:
                    imshow(apertures[0].data[frame_index],
                           maximize=True,
                           title=f"Aperture {0}")
                    imshow(epsf_oversampled,
                           maximize=True,
                           title="oversampled ePSF")
                    imshow(ivar_oversampled,
                           maximize=True,
                           title='oversampled IVAR')

                # Sample down to the initial grid
                epsf = np.zeros((self.box_size, self.box_size))
                for indizes, value in np.ndenumerate(epsf):
                    weighted_sum = np.multiply(epsf_oversampled,
                                               ivar_oversampled)
                    weighted_sum = np.sum(
                        weighted_sum[indizes[0] *
                                     oversampling:(indizes[0] + 1) *
                                     oversampling, indizes[1] *
                                     oversampling:(indizes[1] + 1) *
                                     oversampling])
                    weights_sum = np.sum(
                        ivar_oversampled[indizes[0] *
                                         oversampling:(indizes[0] + 1) *
                                         oversampling, indizes[1] *
                                         oversampling:(indizes[1] + 1) *
                                         oversampling])
                    epsf[indizes] = np.divide(weighted_sum, weights_sum)
                if debug:
                    imshow(epsf, title='ePSF', maximize=True)

                psf_file.update_frame(frame_index, epsf)

        return psf_files
예제 #11
0
 def test_init_from_keyword(self):
     z = Zernike()
     imshow(z.defocus(-1, 256), title='defocus')
예제 #12
0
def main(options=None):

    args = parser(options=options)

    # Interprete input
    args.index = tuple(args.index)

    if args.file is None:
        if args.Fourier_file is not None:
            start_with_Fourier_file = True
            logger.info("Starting from Fourier file {}.".format(
                args.Fourier_file))
            args.file = args.Fourier_file
        else:
            raise IOError(
                "At least one of --file or --Fourier_file have to be given!")
    else:
        start_with_Fourier_file = False
        # Apply default
        if args.Fourier_file is None:
            args.Fourier_file = args.file.replace(".fits", "_Fourier.fits")
    # Apply default
    if args.outfile is None:
        args.outfile = args.file.replace(".fits", ".dat")

    # Starting with the pre-analysis of the file
    if not start_with_Fourier_file:

        # Test of the heavy spot of the aperture
        aperture = Aperture(*args.index,
                            args.radius,
                            data=args.file,
                            crop=False)
        max = aperture.get_aperture_peak()
        if args.debug:
            imshow(aperture.data, title="Aperture in the integrated cube")
        if max == args.index:
            logger.info(
                "Index {} is identical with the maximum of the integrated cube image {}."
                .format(args.index, max))
        else:
            logger.info(
                "Index {} is not identical with the maximum of the integrated cube image {}."
                .format(args.index, max))
            answer = input(
                "Shall the index guess be replaced by the coordinates of the local maximum? (yes,no)"
            )
            if answer.lower() == "yes":
                logger.info(
                    "Replacing index {} by the maximum of the integrated cube image {}..."
                    .format(args.index, max))
                args.index = max
                aperture = Aperture(*args.index,
                                    args.radius,
                                    data=integrated_cube,
                                    mask=None,
                                    crop=True)
                if answer and (answer.lower() == "yes" or answer == ''):
                    if args.debug:
                        imshow(aperture.data, title="Updated aperture")
            else:
                logger.info("Continuing with the central index {}...".format(
                    args.index))

        # Remove margins from aperture
        aperture.remove_margins()
        logger.info("The aperture has shape {}.".format(aperture.data.shape))
        aperture.powerspec_to_file(args.file, args.Fourier_file)
        del aperture

    # Show interim results
    Fourier_cube = fits.getdata(args.Fourier_file)
    Fourier_mean = np.mean(Fourier_cube, axis=0)
    Fourier_var = np.var(
        Fourier_cube,
        axis=0)  # Compute variance to average linearly in the following
    if args.debug:
        imshow(
            Fourier_mean,
            title="Mean of the Fourier transformed cube along the time axis",
            norm=LogNorm())
        imshow(
            np.sqrt(Fourier_var),
            title=
            "Standard deviation in the Fourier transformed cube along the time axis",
            norm=LogNorm())

    # Compute Fourier radius map and mask
    center = (Fourier_mean.shape[0] / 2, Fourier_mean.shape[1] / 2)
    xx, yy = np.mgrid[:Fourier_mean.shape[0], :Fourier_mean.shape[1]]
    Fourier_radius = np.sqrt(
        np.square(xx - center[0]) + np.square(yy - center[1]))
    Fourier_radius = np.ma.masked_greater(Fourier_radius, center[0])
    Fourier_mean = np.ma.masked_array(Fourier_mean, mask=Fourier_radius.mask)
    Fourier_var = np.ma.masked_array(Fourier_var, mask=Fourier_radius.mask)

    # Average azimuthally
    logger.info("Averaging the Fourier plane azimuthally")
    Fourier_radius = Fourier_radius.reshape((-1))
    Fourier_mean = Fourier_mean.reshape((-1))
    Fourier_var = Fourier_var.reshape((-1))

    xdata = np.unique(Fourier_radius)
    ydata = np.zeros(xdata.shape)
    edata = np.zeros(xdata.shape)
    for index, value in enumerate(xdata):
        ydata[index] = np.mean(Fourier_mean[np.where(Fourier_radius == value)])
        edata[index] = np.mean(Fourier_var[np.where(Fourier_radius == value)])
    # Turn variance into standard deviation
    edata = np.sqrt(edata)

    if args.pixel_scale is not None:
        logger.warn("Handling the pixel scale is not implemented yet!")

    if args.debug:
        plot_simple(xdata,
                    ydata,
                    title="{}\nCenter={} Radius={}".format(
                        args.Fourier_file, args.index, args.radius),
                    xlabel="Fourier radius")

    # Save power spectra to outfile
    caption = "Fourier_radius mean std"
    data = np.concatenate(([xdata], [ydata], [edata]), axis=0).transpose()
    np.savetxt(args.outfile, data, header=caption)
예제 #13
0
 def test_call(self):
     target = Target(band='H', star_table=self.star_table, sky_background=13.)
     photon_rate_density = target.get_photon_rate_density(field_of_view=30 * u.arcsec, resolution=.5 * u.arcsec, dither=(1., 0.5))
     imshow(photon_rate_density, title='photon_rate_density')
예제 #14
0
    def extract_psfs(self,
                     file_shifts=None,
                     mode='median',
                     align=True,
                     debug=False):
        """Extract the PSF of the list of ReferenceStars frame by frame.

        Long description...

        Args:
            file_shifts (list, optional):
                List of frame shifts for each of the files with respect to the reference file. These will be used to
                adapt the reference star positions. Default is None.
            mode (str, optional):
                Combination mode for PSFs from different apertures.
            align (bool, optional):
                Execute sub-pixel alignments of apertures. Default is True.
            debug (bool, optional):
                Shows the (integrated) apertures if set to True. Default is False.

        Returns:
            psf_files (list):
                List of file names where the PSFs are stored in.
        """

        # Input parameters
        if mode == 'median':
            func = np.median
        elif mode == 'mean':
            func = np.mean
        elif mode == 'weighted_mean':
            func = weighted_mean
        else:
            raise ValueError(
                'ReferenceStars received unknown mode for extract method ({}).'
                .format(mode))

        # Create a list of psf files and store it to params
        psf_files = []

        # Iterate over input files
        for file_index, file in enumerate(self.in_files):
            # Initialize file by file
            logger.info("Extracting PSFs from file {}".format(file))
            psf_file = PSFFile(file,
                               out_dir=self.save_dir,
                               frame_shape=(self.box_size, self.box_size),
                               in_dir=self.in_dir,
                               header_card_prefix="HIERARCH SPECKLEPY ")
            psf_files.append(psf_file.file_path)

            # Consider alignment of cubes when initializing the apertures, i.e.
            # the position of the aperture in the shifted cube
            if file_shifts is None:
                apertures = self.init_apertures(file)
            else:
                apertures = self.init_apertures(file,
                                                shift=file_shifts[file_index])

            # Extract the number of frames in the FITS file from the header
            frame_number = fits.getheader(os.path.join(self.in_dir,
                                                       file))['NAXIS3']

            # Check apertures visually
            if debug:
                for index, aperture in enumerate(apertures):
                    imshow(aperture.get_integrated(),
                           title="Inspect reference aperture {}".format(index +
                                                                        1))

            # Extract the PSF by combining the aperture frames in the desired mode
            for frame_index in trange(frame_number,
                                      desc="Extracting PSF frame"):
                psfs = np.empty((len(apertures), self.box_size, self.box_size))
                vars = np.ones((len(apertures), self.box_size, self.box_size))
                for aperture_index, aperture in enumerate(apertures):

                    flux = aperture[frame_index]
                    var = aperture.vars

                    if align:
                        flux = ndimage.shift(flux,
                                             shift=(aperture.xoffset,
                                                    aperture.yoffset))
                        var = ndimage.shift(var,
                                            shift=(aperture.xoffset,
                                                   aperture.yoffset))

                    # Normalization of each psf to make median estimate sensible
                    psfs[aperture_index] = flux / np.sum(flux)
                    vars[aperture_index] = var / np.sum(flux)

                if mode != 'weighted_mean':
                    psf = func(psfs, axis=0)
                else:
                    psf, var = weighted_mean(psfs, axis=0, vars=vars)

                psf_file.update_frame(frame_index, psf)

        return psf_files
예제 #15
0
 def test_init(self):
     z = Zernike()
     imshow(z.init_rho(self.size), title='Radius')
     imshow(z.init_phi(self.size), title='Azimuth')
예제 #16
0
 def test_init_from_vector(self):
     z = Zernike()
     coeffs = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]
     # coeffs = np.random.rand((10))
     out = z(coeffs, size=128)
     imshow(out, title='Zernike polynomial {}'.format(coeffs))
예제 #17
0
def holography(params, mode='same', debug=False):
    """Execute the holographic image reconstruction.

    The holographic image reconstruction is an algorithm as outlined, eg. by Schoedel et al (2013, Section 3). This
    function follows that algorithm, see comments in the code. Most of the important functions are imported from other
    modules of specklepy.

    Args:
        params (dict):
            Dictionary that carries all important parameters.
        mode (str, optional):
            Define the size of the output image as 'same' to the reference
            image or expanding to include the 'full' covered field. Default is
            'same'.
        debug (bool, optional):
            Set to True to inspect intermediate results.
            Default is False.

    Returns:
        image (np.ndarray): The image reconstruction.
    """

    logger.info(f"Starting holographic reconstruction...")
    file_archive = FileArchive(file_list=params['PATHS']['inDir'],
                               cards=[],
                               dtypes=[])
    in_files = file_archive.files
    in_dir = file_archive.in_dir
    tmp_dir = params['PATHS']['tmpDir']

    # Input check
    if mode not in ['same', 'full', 'valid']:
        raise SpecklepyValueError('holography()',
                                  argname='mode',
                                  argvalue=mode,
                                  expected="either 'same', 'full', or 'valid'")

    if 'apodizationType' in params['APODIZATION']:
        # Catch deprecated parameter name
        logger.warning(
            "Parameter 'apodizationType' is deprecated. Use 'type' instead!")
        params['APODIZATION']['type'] = params['APODIZATION'][
            'apodizationType']
    if 'apodizationWidth' in params['APODIZATION']:
        # Catch deprecated parameter name
        logger.warning(
            "Parameter 'apodizationWidth' is deprecated. Use 'radius' instead!"
        )
        params['APODIZATION']['radius'] = params['APODIZATION'][
            'apodizationWidth']
    if params['APODIZATION']['type'] is None or params['APODIZATION'][
            'type'].lower() not in ['gaussian', 'airy']:
        logger.error(
            f"Apodization type has not been set or of wrong type ({params['APODIZATION']['type']})"
        )
    if params['APODIZATION']['radius'] is None or not isinstance(
            params['APODIZATION']['radius'], (int, float)):
        logger.error(
            f"Apodization radius has not been set or of wrong type ({params['APODIZATION']['radius']})"
        )

    # Initialize the outfile
    out_file = ReconstructionFile(filename=params['PATHS']['outFile'],
                                  files=in_files,
                                  cards={"RECONSTRUCTION": "Holography"},
                                  in_dir=in_dir)

    # Initialize reconstruction
    reconstruction = Reconstruction(
        in_files=in_files,
        mode=mode,
        alignment_method='ssa',
        reference_image=params['PATHS']['alignmentReferenceFile'],
        in_dir=in_dir,
        tmp_dir=tmp_dir,
        out_file=params['PATHS']['outFile'],
        var_ext=params['OPTIONS']['varianceExtensionName'],
        box_indexes=params['OPTIONS']['box_indexes'],
        debug=debug)

    # (i-ii) Align cubes
    # shifts = get_shifts(files=in_files, reference_file=params['PATHS']['alignmentReferenceFile'],
    #                     lazy_mode=True, return_image_shape=False, in_dir=in_dir, debug=debug)
    shifts = reconstruction.shifts

    # (iii) Compute SSA reconstruction
    # image = ssa(in_files, mode=mode, outfile=out_file, in_dir=in_dir, tmp_dir=tmp_dir,
    #             variance_extension_name=params['OPTIONS']['varianceExtensionName'])
    image = reconstruction.coadd_long_exposures()
    if isinstance(image, tuple):
        # SSA returned a reconstruction image and a variance image
        image, image_var = image
    total_flux = np.sum(image)  # Stored for flux conservation

    # Start iteration from steps (iv) through (xi)
    while True:
        # (iv) Astrometry and photometry, i.e. StarFinder
        extract_sources(image=image,
                        fwhm=params['STARFINDER']['starfinderFwhm'],
                        noise_threshold=params['STARFINDER']['noiseThreshold'],
                        background_subtraction=True,
                        write_to=params['PATHS']['allStarsFile'],
                        star_finder='DAO',
                        debug=debug)

        # (v) Select reference stars
        print(
            "\tPlease copy your desired reference stars from the all stars file into the reference star file!"
        )
        input("\tWhen you are done, hit a ENTER.")

        # (vi) PSF extraction
        ref_stars = ReferenceStars(
            psf_radius=params['PSFEXTRACTION']['psfRadius'],
            reference_source_file=params['PATHS']['refSourceFile'],
            in_files=in_files,
            save_dir=tmp_dir,
            in_dir=in_dir,
            field_segmentation=params['PSFEXTRACTION']['fieldSegmentation'])
        if params['PSFEXTRACTION']['mode'].lower() == 'epsf':
            psf_files = ref_stars.extract_epsfs(file_shifts=shifts,
                                                debug=debug)
        elif params['PSFEXTRACTION']['mode'].lower() in [
                'mean', 'median', 'weighted_mean'
        ]:
            psf_files = ref_stars.extract_psfs(
                file_shifts=shifts,
                mode=params['PSFEXTRACTION']['mode'].lower(),
                debug=debug)
        else:
            raise RuntimeError(
                f"PSF extraction mode '{params['PSFEXTRACTION']['mode']}' is not understood!"
            )
        logger.info("Saved the extracted PSFs...")

        # (vii) Noise thresholding
        psf_noise_mask = None
        for file in psf_files:
            with fits.open(file, mode='update') as hdu_list:
                n_frames = hdu_list[0].header['NAXIS3']
                if psf_noise_mask is None:
                    psf_noise_mask = get_noise_mask(
                        hdu_list[0].data[0],
                        noise_reference_margin=params['PSFEXTRACTION']
                        ['noiseReferenceMargin'])
                for index in range(n_frames):
                    reference = np.ma.masked_array(hdu_list[0].data[index],
                                                   mask=psf_noise_mask)
                    background = np.mean(reference)
                    noise = np.std(reference)
                    update = np.maximum(
                        hdu_list[0].data[index] - background -
                        params['PSFEXTRACTION']['noiseThreshold'] * noise, 0.0)
                    if np.sum(update) == 0.0:
                        raise ValueError(
                            "After background subtraction and noise thresholding, no signal is leftover. "
                            "Please reduce the noiseThreshold!")
                    update = update / np.sum(update)  # Flux sum of order unity
                    hdu_list[0].data[index] = update
                    hdu_list.flush()

        # (viii) Subtraction of secondary sources within the reference apertures
        # TODO: Implement Secondary source subtraction
        pass

        # (ix) Estimate object, following Eq. 1 (Schoedel et al., 2013)
        f_object = FourierObject(in_files,
                                 psf_files,
                                 shifts=shifts,
                                 mode=mode,
                                 in_dir=in_dir)
        f_object.coadd_fft()

        # (x) Apodization
        f_object.apodize(type=params['APODIZATION']['type'],
                         radius=params['APODIZATION']['radius'])

        # (xi) Inverse Fourier transform to retain the reconstructed image
        image = f_object.ifft(total_flux=total_flux)

        # Inspect the latest reconstruction
        if debug:
            imshow(image)

        # Save the latest reconstruction image to outfile
        out_file.data = image

        # Ask the user whether the iteration shall be continued or not
        answer = input(
            "\tDo you want to continue with one more iteration? [yes/no]\n\t")
        if answer.lower() in ['n', 'no']:
            break

    # Repeat astrometry and photometry, i.e. StarFinder on final image
    extract_sources(image=image,
                    fwhm=params['STARFINDER']['starfinderFwhm'],
                    noise_threshold=params['STARFINDER']['noiseThreshold'],
                    background_subtraction=True,
                    write_to=params['PATHS']['allStarsFile'],
                    star_finder='DAO',
                    debug=debug)

    # Finally return the image
    return image
예제 #18
0
 def test_call(self):
     imshow(self.test_data)
     test_aperture = Aperture(8, 8, 4, data=self.test_data)
     imshow(test_aperture.data)
     test_aperture = Aperture(64, 64, 16, data=self.test_data_large)
     imshow(test_aperture.data)
예제 #19
0
def get_shift(image,
              reference_image=None,
              is_fourier_transformed=False,
              mode='correlation',
              debug=False):
    """Estimate the shift between an image and a reference image.

    Estimate the relative shift between an image and a reference image by means of a 2D correlation
    ('correlation' mode) or by comparison of the emission peaks ('peak' or 'maximum' modes).

    Args:
        image (np.ndarray):
            2D array of the image to be shifted.
        reference_image (np.ndarray):
            2D array of the reference image of the shift.
        is_fourier_transformed (bool):
            Indicate whether the reference image is already Fourier transformed. This is implemented to save
            computation by computing that transform only once.
        mode (str, optional):
            Mode of the shift estimate. In 'correlation' mode, a 2D correlation is used to estimate the shift of the
            array. This is computationally much more expensive than the identical 'maximum' or 'peak' modes, which
            simply identify the coordinates of the emission peaks and return the difference. Though these modes may be
            fooled by reference sources of similar brightness. Default is 'correlation'.
        debug (bool, optional):
            Set to True to inspect intermediate results. Default is False.

    Returns:
        shift (tuple):
            Tuple of shift indices for each axis.
    """

    # Check input parameters
    if not isinstance(image, np.ndarray) or image.ndim is not 2:
        raise TypeError(
            f"Image input must be 2D numpy.ndarray, but was provided as {type(image)}"
        )
    if not isinstance(reference_image, np.ndarray) or image.ndim is not 2:
        raise TypeError(
            f"Image input must be 2D numpy.ndarray, but was provided as {type(reference_image)}"
        )
    if not isinstance(is_fourier_transformed, bool):
        raise SpecklepyTypeError('get_shift()',
                                 argname='is_Fourier_transformed',
                                 argtype=type(is_fourier_transformed),
                                 expected='bool')
    if isinstance(mode, str):
        if mode not in ['correlation', 'maximum', 'peak']:
            raise SpecklepyValueError(
                'get_shift()',
                argname='mode',
                argvalue=mode,
                expected="'correlation', 'maximum' or 'peak'")
    else:
        raise SpecklepyTypeError('get_shift()',
                                 argname='mode',
                                 argtype=type(mode),
                                 expected='str')

    # Simple comparison of the peaks in the images
    if mode == 'maximum' or mode == 'peak':
        peak_image = np.unravel_index(np.argmax(image, axis=None), image.shape)
        peak_ref_image = np.unravel_index(
            np.argmax(reference_image, axis=None), reference_image.shape)
        return peak_ref_image[0] - peak_image[0], peak_ref_image[
            1] - peak_image[1]

    # Using correlation of the two images
    elif mode == 'correlation':
        # Get the Fourier transformed reference image for cross-correlation
        if not is_fourier_transformed:
            f_reference_image = np.fft.fft2(reference_image)
        else:
            f_reference_image = reference_image

        # Fourier transform the image
        f_image = np.conjugate(np.fft.fft2(image))

        # Compute the 2-dimensional correlation
        correlation = np.fft.ifft2(np.multiply(f_reference_image, f_image))
        correlation = np.fft.fftshift(correlation)
        if debug:
            imshow(np.abs(correlation), title='FFT shifted correlation')

        # Derive the shift from the correlation
        shift = np.unravel_index(np.argmax(correlation), correlation.shape)
        shift = tuple(x - int(correlation.shape[i] / 2)
                      for i, x in enumerate(shift))
        return shift
예제 #20
0
 def test_psf(self):
     psf_image = psf(self.aperture)
     imshow(psf_image, title="PSF", norm=LogNorm())
예제 #21
0
    def expose(self,
               photon_rate,
               integration_time,
               photon_rate_resolution,
               debug=False):
        """Compute the number of electrons in every pixel after the exposure.

		Args:
			photon_rate (Quantity):
				Passed to expose() method.
			integration_time (Quantity):
				Passed to expose() and readout() methods.
			photon_rate_resolution (Quantity):
				Angular resolution of the photon_rate array, used for resampling this to the detectors grid.
			debug (bool, optional):
				Set True for debugging. Default is False.

		Returns:
			electrons (Quantity):

		"""

        # Input parameters
        if isinstance(photon_rate, (int, float)):
            logger.warning(
                f"Interpreting scalar type photon_rate as {photon_rate} photon/ s"
            )
            photon_rate = photon_rate * Unit('ph / s')
        elif not isinstance(photon_rate, Quantity):
            raise SpecklepyTypeError('expose', 'photon_rate',
                                     type(photon_rate), 'Quantity')

        if isinstance(integration_time, (int, float)):
            logger.warning(
                f"Interpreting scalar type integration_time as {integration_time} s"
            )
            integration_time = integration_time * Unit('s')
        elif not isinstance(integration_time, Quantity):
            raise SpecklepyTypeError('expose', 'integration_time',
                                     type(integration_time), 'Quantity')

        if isinstance(photon_rate_resolution, (int, float)):
            logger.warning(
                f"Interpreting scalar type photon_rate_resolution as {photon_rate_resolution} arcsec"
            )
            photon_rate_resolution = photon_rate_resolution * Unit('arcsec')
        elif not isinstance(photon_rate_resolution, Quantity):
            raise SpecklepyTypeError('expose', 'photon_rate_resolution',
                                     type(photon_rate_resolution), 'Quantity')

        # Resample the photon rate to the detector resolution
        photon_rate = self.resample(
            photon_rate=photon_rate,
            photon_rate_resolution=photon_rate_resolution)
        photons = photon_rate * integration_time
        if debug:
            imshow(photons, title='photons')

        # Compute photon shot noise with Poisson statistics
        photons = np.random.poisson(photons.value) * photons.unit

        # Incorporate efficiencies
        if self.optics_transmission is not None:
            photons = photons * self.optics_transmission
        electrons = photons * self.quantum_efficiency
        if debug:
            imshow(electrons, title='electrons')

        # Limit to the saturation level of the detector
        if self.saturation_level is not None:
            electrons = np.minimum(
                electrons, self.saturation_level)  # * self.system_gain)
        electrons = np.round(electrons)
        self.array = electrons
        return electrons