예제 #1
0
	def model_psf(self, model, radius, psf_resolution, shape=256, **kwargs):
		"""Models the PSF given the desired model function and kwargs.

		Args:
			model (str):
				Must be either 'airydisk' or 'gaussian'.
			radius (int, float, astropy.unit.Quantity):
				Radius of the PSF model that is the radius of the first zero in an AiryDisk model or the standard
				deviation of the Gaussian model. Scalar values will be interpreted in units of arcseconds.
			psf_resolution (int, float, astropy.unit.Quantity):
				Resolution of the model PSF, equivalent to the pixel scale of the array. Scalar values will be
				interpreted in units of arcseconds.
			shape (int, optional):
				Size of the model PSF along both axes.
			kwargs are forwarded to the model function.
		"""

		# Check input parameters
		if not isinstance(model, str):
			raise SpecklepyTypeError('model_psf', 'model', type(model), 'str')

		if isinstance(radius, Quantity):
			self.radius = radius
		elif isinstance(radius, (int, float)):
			logger.warning(f"Interpreting scalar type radius as {radius} arcsec")
			self.radius = Quantity(f"{radius} arcsec")
		elif isinstance(radius, str):
			self.radius = Quantity(radius)
		else:
			raise SpecklepyTypeError('model_psf', 'radius', type(radius), 'Quantity')

		if isinstance(psf_resolution, Quantity):
			self.psf_resolution = psf_resolution
		elif isinstance(psf_resolution, (int, float)):
			logger.warning(f"Interpreting scalar type psf_resolution as {psf_resolution} arcsec")
			self.psf_resolution = Quantity(f"{psf_resolution} arcsec")
		elif isinstance(psf_resolution, str):
			self.psf_resolution = Quantity(psf_resolution)
		else:
			raise SpecklepyTypeError('model_psf', 'psf_resolution', type(psf_resolution), 'Quantity')

		if isinstance(shape, int):
			center = (shape / 2, shape / 2)
			shape = (shape, shape)
		elif isinstance(shape, tuple):
			center = (shape[0] / 2, shape[1] / 2)
		else:
			raise SpecklepyTypeError('model_psf', 'shape', type(shape), 'int or tuple')

		if model.lower() == 'airydisk':
			model = models.AiryDisk2D(x_0=center[0], y_0=center[1], radius=float(self.radius / self.psf_resolution))
		elif model.lower() == 'gaussian':
			stddev = float(self.radius / self.psf_resolution)
			model = models.Gaussian2D(x_mean=center[0], y_mean=center[1], x_stddev=stddev, y_stddev=stddev)
		else:
			raise SpecklepyValueError('model_psf', 'model', model, 'either AiryDisk or Gaussian')

		y, x = np.mgrid[0:shape[0], 0:shape[1]]
		self.psf = model(x, y)
		self.psf = self.normalize(self.psf)
예제 #2
0
	def get_header_quantity(header, key, aliases=None):
		"""Extract unit quantities from FITS headers, based on the unit in the comment.

		Args:
			header:
				Header of the FITS file.
			key (str):
				Key or name of the FITS header card.
			aliases (dict, optional):
				The aliases dictionary maps unit strings such that they are understood by Unit(str).

		Returns:
			quantity (Quantity):
				Quantity derived from the combination of the FITS header card value and comment.
		"""

		# Read header card
		value = header[key]
		comment = header.comments[key]

		# Handle empty comment
		if not comment:
			logger.warning("Function 'get_value()' received an empty comment string and returns scalar value.")
			return value

		# Apply fall back values
		if aliases is None:
			aliases = {'sec': 's', 'milliarcsec': 'mas', 'microns': 'micron'}

		# Replace comment entries that are not understood by astropy.units by corresponding aliases
		if comment in aliases.keys():
			comment = aliases[comment]

		return value * Unit(comment)
예제 #3
0
	def __init__(self, diameter, psf_source, central_obscuration=None, psf_frame=0, **kwargs):
		"""Instantiate Telescope class:

		Args:
			diameter (float or astropy.units.Quantity):
				Telescope diameter, used to compute the light collecting area.
			psf_source (str):
				File name to read PSFs from or model name. Models can be either 'AiryDisk' or 'Gaussian'. The models
				require central_obscuration (float, optional): Radial fraction of the
				telescope aperture that is blocked by the secondary.
			central_obscuration (float, optional):
				Radial fraction that is centrally obscured, by the secondary mirror.
			psf_frame (int, optional):
				Index of the first frame to read from psf_source.
			kwargs:
				Are forwarded to the psf_source model.
		"""

		# Input parameters
		if isinstance(diameter, Quantity):
			self.diameter = diameter
		elif isinstance(diameter, (int, float)):
			logger.warning(f"Interpreting scalar type diameter as {diameter} m")
			self.diameter = Quantity(f"{diameter} m")
		elif isinstance(diameter, str):
			self.diameter = Quantity(diameter)
		else:
			raise SpecklepyTypeError('Telescope', 'diameter', type(diameter), 'Quantity')

		if isinstance(psf_source, str):
			self.psf_source = psf_source
		else:
			raise SpecklepyTypeError('Telescope', 'psf_source', type(psf_source), 'str')

		if isinstance(central_obscuration, float) or central_obscuration is None:
			self.central_obscuration = central_obscuration
		elif isinstance(central_obscuration, str):
			self.central_obscuration = float(central_obscuration)
		else:
			raise SpecklepyTypeError('Telescope', 'central_obscuration', type(central_obscuration), 'float')

		if isinstance(psf_frame, int):
			self.psf_frame = psf_frame
		else:
			raise SpecklepyTypeError('Telescope', 'psf_frame', type(psf_frame), 'int')

		# Derive secondary parameters
		if self.central_obscuration is not None:
			self.area = (1. - self.central_obscuration**2) * np.pi * (self.diameter / 2)**2
		else:
			self.area = np.pi * (self.diameter / 2)**2

		if psf_source.lower() in ['airydisk', 'gaussian']:
			self.model_psf(psf_source, **kwargs)
		else:
			psf, time_resolution, angular_resolution = self.read_psf_file(psf_source)
			self.psf = psf
			self.psf_resolution = angular_resolution
			if time_resolution is not None:
				self.timestep = time_resolution
예제 #4
0
    def get_psf_variance(self):
        """Extract a radial variance profile of the speckle PSFs."""

        # Check data shape
        if self.data.ndim != 3:
            logger.warning(
                f"Aperture data need to be 3D but are {self.data.ndim}-dimensional!"
            )
            raise SystemExit("Aborting...")

        # Initialize output radii and array
        radius_map = self.make_radius_map()
        rdata = np.unique(radius_map)
        ydata = np.zeros(rdata.shape)
        edata = np.zeros(rdata.shape)

        # Extract 2D variance map
        var_map = np.var(self.data, axis=0)

        # Iterate over aperture radii
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            for index, radius in enumerate(rdata):
                subset = var_map[np.where(radius_map == radius)]
                ydata[index] = np.mean(subset)
                edata[index] = np.mean(subset)

        return rdata, ydata, edata
예제 #5
0
    def apodize(self, type, radius, crop=False):
        """Apodize the Fourier object with a Gaussian or Airy disk kernel.

        Args:
            type (str):
                Type of the apodization. Can be either `Gaussian` or `Airy`. See specklepy.core.psfmodel for details.
            radius (float):
                Radius of the apodization kernel. This is the standard deviation of a Gaussian kernel or the radius of
                first zero in the case of an Airy function.
            crop (bool, optional):
                Crop corners of the PSF and set them to zero.

        Returns:
            apodized (np.array, dtype=np.complex128):
                Apodized Fourier-plane image.
        """

        # Assert image shape
        if self.fourier_image.shape[0] != self.fourier_image.shape[1]:
            logger.warning(
                "The apodization is applied to a non-quadratic input image. This may cause some "
                "unpredictable results!")

        logger.info("Apodizing the object...")
        if type is None and radius is None:
            logger.warning(
                f"Apodization is skipped for either type or radius not being defined!"
            )
            return self.fourier_image

        # Interpret function input and compute apodization PSF
        psf_model = PSFModel(type=type, radius=radius)
        apodization_psf = psf_model(self.fourier_image.shape)

        # Crop corners of the PSF
        if crop:
            threshold = apodization_psf[0, int(psf_model.center[1])]
            apodization_psf -= threshold
            apodization_psf = np.maximum(apodization_psf, 0.0)

        # Normalize to unity
        apodization_psf /= np.sum(apodization_psf)

        # Transform into Fourier space
        apodization_otf = otf(apodization_psf)
        self.fourier_image = np.multiply(self.fourier_image, apodization_otf)

        return self.fourier_image
예제 #6
0
파일: plots.py 프로젝트: deepin00/specklepy
def maximize_plot():
    mng = plt.get_current_fig_manager()
    backend = mpl.get_backend()
    if backend == 'Qt4Agg' or backend == 'Qt5Agg':
        mng.window.showMaximized()
    elif backend == 'wxAgg':
        mng.frame.Maximize(True)
    elif backend == 'TkAgg':
        try:
            mng.window.state('zoomed')
        except:
            logger.warning("Could not maximize plot")
    else:
        raise RuntimeWarning(
            "Maximizing plot is not possible with matplotlib backend {}".
            format(backend))
예제 #7
0
	def integrate_psf(self, integration_time, hdu=None):
		"""Integrates psf frames over the input time.

		Args:
			integration_time (Quantity):
				This is used for computing the number of frames 'n_frames', via floor division by the 'timestep'
				attribute.
			hdu (int, str, optional):
				Specifier of the HD Default is None for the first HD
		"""

		# Check input parameters
		if isinstance(integration_time, (int, float)):
			logger.warning(f"Interpreting scalar type integration_time as {integration_time} s")
			integration_time = integration_time * Unit('s')
		elif not isinstance(integration_time, Quantity):
			raise SpecklepyTypeError('integrate_psf', 'integration_time', type(integration_time), 'Quantity')

		if integration_time < self.timestep:
			raise ValueError(f"integrate_psf received integration time {integration_time} shorter than the time "
							 f"resolution of the psf source ({self.timestep})!")

		n_frames = int(integration_time / self.timestep)

		# Read PSF frames from source file
		data = fits.getdata(self.psf_source, hdu)

		self.psf_frame += 1
		if self.psf_frame + n_frames < data.shape[0]:
			self.psf = np.sum(data[self.psf_frame : self.psf_frame+n_frames], axis=0)
		else:
			self.psf = np.sum(data[self.psf_frame : ], axis=0)
			self.psf += np.sum(data[ : (self.psf_frame+n_frames) % data.shape[0]], axis=0)
		self.psf_frame += n_frames - 1
		self.psf_frame = self.psf_frame % data.shape[0]

		# Normalize the integrated PSF
		self.psf = self.normalize(self.psf)
예제 #8
0
    def expose(self,
               photon_rate,
               integration_time,
               photon_rate_resolution,
               debug=False):
        """Compute the number of electrons in every pixel after the exposure.

		Args:
			photon_rate (Quantity):
				Passed to expose() method.
			integration_time (Quantity):
				Passed to expose() and readout() methods.
			photon_rate_resolution (Quantity):
				Angular resolution of the photon_rate array, used for resampling this to the detectors grid.
			debug (bool, optional):
				Set True for debugging. Default is False.

		Returns:
			electrons (Quantity):

		"""

        # Input parameters
        if isinstance(photon_rate, (int, float)):
            logger.warning(
                f"Interpreting scalar type photon_rate as {photon_rate} photon/ s"
            )
            photon_rate = photon_rate * Unit('ph / s')
        elif not isinstance(photon_rate, Quantity):
            raise SpecklepyTypeError('expose', 'photon_rate',
                                     type(photon_rate), 'Quantity')

        if isinstance(integration_time, (int, float)):
            logger.warning(
                f"Interpreting scalar type integration_time as {integration_time} s"
            )
            integration_time = integration_time * Unit('s')
        elif not isinstance(integration_time, Quantity):
            raise SpecklepyTypeError('expose', 'integration_time',
                                     type(integration_time), 'Quantity')

        if isinstance(photon_rate_resolution, (int, float)):
            logger.warning(
                f"Interpreting scalar type photon_rate_resolution as {photon_rate_resolution} arcsec"
            )
            photon_rate_resolution = photon_rate_resolution * Unit('arcsec')
        elif not isinstance(photon_rate_resolution, Quantity):
            raise SpecklepyTypeError('expose', 'photon_rate_resolution',
                                     type(photon_rate_resolution), 'Quantity')

        # Resample the photon rate to the detector resolution
        photon_rate = self.resample(
            photon_rate=photon_rate,
            photon_rate_resolution=photon_rate_resolution)
        photons = photon_rate * integration_time
        if debug:
            imshow(photons, title='photons')

        # Compute photon shot noise with Poisson statistics
        photons = np.random.poisson(photons.value) * photons.unit

        # Incorporate efficiencies
        if self.optics_transmission is not None:
            photons = photons * self.optics_transmission
        electrons = photons * self.quantum_efficiency
        if debug:
            imshow(electrons, title='electrons')

        # Limit to the saturation level of the detector
        if self.saturation_level is not None:
            electrons = np.minimum(
                electrons, self.saturation_level)  # * self.system_gain)
        electrons = np.round(electrons)
        self.array = electrons
        return electrons
예제 #9
0
	def get_photon_rate(self, photon_rate_density, photon_rate_density_resolution=None, integration_time=None,
						debug=False):
		"""Propagates the 'photon_rate_density' array through the telescope.

		The photon_rate_density is multiplied by the telescope collecting area and then
		convolved with the PSF. If the resolution of the flux array is different
		from the telescopes psf_resolution, then one is resampled. If the PSF is
		non-static, it will be integrated over the 'integration_time' value.

		Args:
			photon_rate_density (np.ndarray, dtype=Quantity):
			photon_rate_density_resolution (Quantity, optional):
			integration_time(Quantity, optional):
				Required only if the PSF is non-static.
			debug (bool, optional):
				Show additional information for debugging. Default is False.

		Returns:
			photon_rate (Quantity): PSF-convolved photon rate array.
		"""

		# Input parameters
		if not isinstance(photon_rate_density, Quantity):
			raise SpecklepyTypeError('get_photon_rate', 'photon_rate_density', type(photon_rate_density), 'Quantity')

		if photon_rate_density_resolution is not None:
			if not isinstance(photon_rate_density_resolution, Quantity):
				raise SpecklepyTypeError('get_photon_rate', 'photon_rate_density_resolution',
										 type(photon_rate_density_resolution), 'Quantity')
			psf_resample_mode = True
		else:
			psf_resample_mode = False

		if integration_time is None and hasattr(self, 'timestep'):
			raise ValueError("If the PSF source of Telescope is non-static, the call function requires the "
							 "integration_time.")
		elif isinstance(integration_time, (int, float)):
			logger.warning(f"Interpreting scalar type integration_time as {integration_time} s")
			integration_time = Quantity(f"{integration_time} s")
		elif not isinstance(integration_time, Quantity):
			raise SpecklepyTypeError('get_photon_rate', 'integration_time', type(integration_time), 'Quantity')

		# Apply telescope collecting area
		photon_rate = photon_rate_density * self.area
		total_flux = np.sum(photon_rate)
		photon_rate_unit = photon_rate.unit

		# Prepare PSF if non-static
		if hasattr(self, 'timestep'):
			self.integrate_psf(integration_time=integration_time)

		# Resample photon_rate_density to psf resolution
		if psf_resample_mode:

			# Compute the zoom factor
			ratio = float(photon_rate_density_resolution / self.psf_resolution)
			# try:
			# 	ratio = float(photon_rate_density_resolution / self.psf_resolution)
			# except UnitConversionError as e:
			# 	raise UnitConversionError(f"The resolution values of the image ({photon_rate_density_resolution}) and "
			# 							  f"PSF ({self.psf_resolution}) have different units!")

			# Zoom either the image or the PSF, depending on the zoom factor
			with warnings.catch_warnings():
				warnings.simplefilter('ignore')
				if ratio < 1.0:
					self.psf = zoom(self.psf, 1/ratio, order=1) / ratio**2
					self.psf = self.normalize(self.psf)
				else:
					memory_sum = np.sum(photon_rate)
					photon_rate = zoom(photon_rate, ratio, order=1) / ratio**2
					photon_rate = photon_rate / np.sum(photon_rate) * memory_sum

		# Convolve the array with the PSF
		convolved = fftconvolve(photon_rate, self.psf, mode='same') * photon_rate_unit

		# Report on flux conservation
		logger.debug(f"Flux conservation during convolution: {total_flux} > {np.sum(convolved)}")

		return convolved.decompose()
예제 #10
0
def subtract_sky_background(in_files,
                            out_files=None,
                            method='scalar',
                            source='sky',
                            mask_sources=False,
                            file_path=None,
                            tmp_dir=None,
                            show=False,
                            debug=False):
    """Estimate and subtract the sky background via different methods and sources.

    TODO: Implement sky subtraction from image

    Args:
        in_files (specklepy.FileArchive):
            File archive storing the information of all the files in the reduction.
        out_files (list):
            List of files to apply the sky subtraction to. If left empty, the list stored in the `in_files` FileArchive
            is used.
        method (str, optional):
            Switch between a scalar (`scalar`) background value or a 2D image (`images`).
        source (str, optional):
            Source for estimating the background from. Can be either `sky` to measure from dedicated sky frames or
            `science` to use the science frames themselves. Typically, these frames have a high number of sources, so
            `mask_sources` should be switched on.
        mask_sources (bool, optional):
            In empty reference fields, this masking option should stay at `False`, since source masking is not well
            tested. However, masking sources yields a more precise result.
        file_path (str, optional):
            Path to the files, listed in `in_files`.
        tmp_dir (str, optional):
            Directory to which temporary results and QA data is stored.
        show (bool, optional):
            Show plots of sky estimates for each sequence. They will be created and stored regardless of this choice.
        debug (bool, optional):
            Show debugging information.
    """

    # Set logging level
    if debug:
        logger.setLevel('DEBUG')

    # Apply fall back values
    if method is None:
        method = 'scalar'
    logger.info(f"Sky background subtraction method: {method}")
    if source is None:
        source = 'sky'
    logger.info(f"Sky background subtraction source: {source}")
    if out_files is None:
        out_files = in_files.product_files
    if out_files is None:
        logger.warning(
            f"Output files are not declared in subtract_sky_background!")

    # Identify the observing sequences
    sequences = in_files.identify_sequences(source=source)

    # Start the background estimates
    if method == 'scalar':

        # Iterate through observing sequences
        for s, sequence in enumerate(sequences):

            logger.info(
                f"Starting observing sequence {s} :: Object {sequence.object} :: Setup {sequence.setup}"
            )

            # Compute weights based on the time offset to the individual sky observations
            weights = sequence.compute_weights()

            # Start extracting sky fluxes
            sky_bkg = np.zeros(sequence.n_sky)
            sky_bkg_std = np.zeros(sequence.n_sky)
            for i in trange(sequence.n_sky,
                            desc='Estimate sky background from cube'):
                file = sequence.sky_files[i]
                bkg, d_bkg = estimate_sky_background(file,
                                                     method=method,
                                                     mask_sources=mask_sources,
                                                     path=file_path)
                sky_bkg[i] = bkg
                sky_bkg_std[i] = d_bkg
            logger.debug(
                f"Shapes:\nF: {sky_bkg.shape}\ndF: {sky_bkg_std.shape}")

            # Compute weighted sky background for each science file
            weighted_sky_bkg = np.dot(weights, sky_bkg)
            weighted_sky_bkg_var = np.dot(np.square(weights),
                                          np.square(sky_bkg_std))

            # Store sky background estimates
            sky_bkg_table = Table(data=[
                sequence.sky_files, weighted_sky_bkg, weighted_sky_bkg_var
            ],
                                  names=['FILE', 'BKG', 'VAR'])
            sky_bkg_table_name = f"sky_bkg_{sequence.object}_{sequence.setup}.fits"
            sky_bkg_table.write(os.path.join(tmp_dir, sky_bkg_table_name),
                                overwrite=True)

            # Plot sky flux estimates
            for i, file in enumerate(sequence.sky_files):
                plt.text(sequence.sky_time_stamps[i],
                         sky_bkg[i],
                         file,
                         rotation=90,
                         alpha=.5)
            for i, file in enumerate(sequence.science_files):
                plt.text(sequence.science_time_stamps[i],
                         weighted_sky_bkg[i],
                         file,
                         rotation=90,
                         alpha=.66)
            plt.errorbar(x=sequence.sky_time_stamps,
                         y=sky_bkg,
                         yerr=sky_bkg_std,
                         fmt='None',
                         ecolor='tab:blue',
                         alpha=.5)
            plt.plot(sequence.sky_time_stamps,
                     sky_bkg,
                     'D',
                     label='Sky',
                     c='tab:blue')
            plt.errorbar(x=sequence.science_time_stamps,
                         y=weighted_sky_bkg,
                         yerr=np.sqrt(weighted_sky_bkg_var),
                         fmt='None',
                         ecolor='tab:orange',
                         alpha=.66)
            plt.plot(sequence.science_time_stamps,
                     weighted_sky_bkg,
                     'D',
                     label='Science',
                     c='tab:orange')
            plt.xlabel('Time (s)')
            plt.ylabel('Flux (counts)')
            plt.legend()
            save_figure(
                os.path.join(tmp_dir,
                             sky_bkg_table_name.replace('.fits', '.png')))
            if show:
                plt.show()
            plt.close()

            # Subtract sky from product files
            for i, science_file in enumerate(sequence.science_files):
                for out_file in out_files:
                    if science_file in out_file:
                        science_file = out_file
                logger.info(
                    f"Applying sky background subtraction on file {science_file}"
                )
                with fits.open(science_file, mode='update') as hdu_list:
                    hdu_list[0].data = hdu_list[0].data.astype(
                        float) - weighted_sky_bkg[i]
                    if 'VAR' in hdu_list:
                        hdu_list['VAR'].data = hdu_list[
                            'VAR'].data + weighted_sky_bkg_var[i]
                    else:
                        # Construct new HDU
                        shape = np.array(hdu_list[0].data.shape)[[-2, -1]]
                        data = np.full(shape=shape,
                                       fill_value=weighted_sky_bkg_var[i])
                        hdu = fits.ImageHDU(data=data, name='VAR')
                        hdu_list.append(hdu)
                    hdu_list[0].header.set('SKYCORR', str(datetime.now()))
                    hdu_list[0].header.set('SKYBKG', weighted_sky_bkg[i],
                                           "Sky background")
                    hdu_list[0].header.set('SKYVAR', weighted_sky_bkg_var[i],
                                           "Sky background variance")
                    hdu_list.flush()

    elif method in ['image', 'frame']:
        raise NotImplementedError(
            "Sky subtraction in image mode is not implemented yet!")

    else:
        raise ValueError(f"Sky subtraction method {method} is not understood!")
예제 #11
0
    def get_photon_rate_density(self, field_of_view, resolution, dither=None):
        """Creates an image of the field of view.

        Args:
            field_of_view (Quantity or tuple, dtype=Quantity):
                Size of the field of view that is covered by the output image.
            resolution (Quantity):
                Resolution of the image. Optimally, set it to Telescope.psf_resolution to avoid resampling the image.
            dither (tuple, optional):
                Dither position, relative to the (0, 0) standard phase center.

        Returns:
            photon_rate_density (Quantity):
                2D image of the photon rate density towards the standard phase center or dithered position.
        """

        # Input parameters
        if isinstance(resolution, (int, float)):
            logger.warning(
                f"Interpreting float type resolution as {resolution} arcsec")
            resolution = resolution * Unit('arcsec')
        elif isinstance(resolution, Quantity):
            pass
        else:
            raise SpecklepyTypeError('get_photon_rate_density', 'resolution',
                                     type(resolution), 'Quantity')
        self.resolution = resolution

        if isinstance(field_of_view, (int, float)):
            logger.warning(
                f"Interpreting float type FoV as {field_of_view} arcsec")
            field_of_view = field_of_view * Unit('arcsec')
        elif isinstance(field_of_view, (tuple, list, Quantity)):
            pass
        else:
            raise SpecklepyTypeError('get_photon_rate_density',
                                     'field_of_view', type(field_of_view),
                                     'tuple')

        # Add 10% FoV to avoid dark margins
        self.field_of_view = ScaledTuple(field_of_view,
                                         scale=resolution,
                                         scaled=True)
        self.field_of_view *= 1.1

        if dither is None:
            phase_center = (0, 0)
        elif isinstance(dither, (tuple, list)):
            if not (isinstance(dither[0], (int, float))):
                raise TypeError(
                    "Dithers should be provided as int or float. These are then interpreted as arcseconds."
                )
            else:
                phase_center = dither
        else:
            raise SpecklepyTypeError('get_photon_rate_density', 'dither',
                                     type(dither), 'tuple')

        # Define image center for centering star positions around the image center
        center = copy(self.field_of_view) / 2

        # Create array with sky background flux
        self.flux_per_pixel = (self.sky_background_flux *
                               self.resolution**2).decompose()
        photon_rate_density = np.ones(
            shape=self.field_of_view.index) * self.flux_per_pixel

        # Add stars from star_table to photon_rate_density
        for row in self.stars:
            position = Position(row['x'],
                                row['y'],
                                scale=self.resolution.to('arcsec').value,
                                scaled=True)
            position.offset(center)
            position.offset(phase_center, scaled=True)
            flux = row['flux']
            try:
                photon_rate_density.value[position.index] = np.maximum(
                    photon_rate_density.value[position.index], flux)
            except IndexError:
                # Star is placed outside the field of view
                pass

        return photon_rate_density
예제 #12
0
    def __init__(self,
                 band,
                 star_table=None,
                 sky_background=None,
                 photometry_file=None):
        """Instantiate Target class.

        Args:
            band (str):
                Name of the band. Used for extracting the band specific reference flux for magnitude 0.
            star_table (str, optional):
                Name of the file with the data of all stars.
            sky_background (Quantity, optional):
                Sky background. Int and float inputs will be interpreted as mag / arcsec**2.
            photometry_file (str, optional):
                Name of the file, from which the band specific reference flux is extracted.
        """

        # Input parameters
        if isinstance(band, str):
            try:
                self.band = eval(band)
            except NameError:
                self.band = band
        else:
            raise SpecklepyTypeError('Target', 'band', type(band), 'str')

        if star_table is None or isinstance(star_table, str):
            self.star_table = star_table
            # Read star table already here?
        else:
            raise SpecklepyTypeError('Target', 'star_table', type(star_table),
                                     'str')

        if photometry_file is None or isinstance(photometry_file, str):
            self.photometry_file = photometry_file
        else:
            raise SpecklepyTypeError('Target', 'photometry_file',
                                     type(photometry_file), 'str')
        self.band_reference_flux = self.get_reference_flux(
            self.photometry_file, self.band)

        if isinstance(sky_background, str):
            sky_background = eval(sky_background)
        if sky_background is None:
            self.sky_background_flux = 0.0 / Unit('arcsec')**2
        elif isinstance(sky_background, Quantity):
            # Interpreting as mag / arcsec**2
            logger.warning(
                "Interpreting sky_background as in units of mag per arcsec**2."
            )
            self.sky_background_flux = self.magnitude_to_flux(
                sky_background.value) / Unit('arcsec')**2
        elif isinstance(sky_background, (int, float)):
            logger.warning(
                f"Interpreting scalar type sky_background as {sky_background * Unit('mag') / Unit('arcsec')**2}"
            )
            self.sky_background_flux = self.magnitude_to_flux(
                sky_background) / Unit('arcsec')**2
        else:
            raise SpecklepyTypeError('Target', 'sky_background',
                                     type(sky_background), 'Quantity')

        # Initialize class attributes
        self.shape = None
        self.field_of_view = None
        self.pixel_scale = None
        self.resolution = None
        self.flux_per_pixel = None
        self.stars = self.read_star_table(self.star_table)
예제 #13
0
def holography(params, mode='same', debug=False):
    """Execute the holographic image reconstruction.

    The holographic image reconstruction is an algorithm as outlined, eg. by Schoedel et al (2013, Section 3). This
    function follows that algorithm, see comments in the code. Most of the important functions are imported from other
    modules of specklepy.

    Args:
        params (dict):
            Dictionary that carries all important parameters.
        mode (str, optional):
            Define the size of the output image as 'same' to the reference
            image or expanding to include the 'full' covered field. Default is
            'same'.
        debug (bool, optional):
            Set to True to inspect intermediate results.
            Default is False.

    Returns:
        image (np.ndarray): The image reconstruction.
    """

    logger.info(f"Starting holographic reconstruction...")
    file_archive = FileArchive(file_list=params['PATHS']['inDir'],
                               cards=[],
                               dtypes=[])
    in_files = file_archive.files
    in_dir = file_archive.in_dir
    tmp_dir = params['PATHS']['tmpDir']

    # Input check
    if mode not in ['same', 'full', 'valid']:
        raise SpecklepyValueError('holography()',
                                  argname='mode',
                                  argvalue=mode,
                                  expected="either 'same', 'full', or 'valid'")

    if 'apodizationType' in params['APODIZATION']:
        # Catch deprecated parameter name
        logger.warning(
            "Parameter 'apodizationType' is deprecated. Use 'type' instead!")
        params['APODIZATION']['type'] = params['APODIZATION'][
            'apodizationType']
    if 'apodizationWidth' in params['APODIZATION']:
        # Catch deprecated parameter name
        logger.warning(
            "Parameter 'apodizationWidth' is deprecated. Use 'radius' instead!"
        )
        params['APODIZATION']['radius'] = params['APODIZATION'][
            'apodizationWidth']
    if params['APODIZATION']['type'] is None or params['APODIZATION'][
            'type'].lower() not in ['gaussian', 'airy']:
        logger.error(
            f"Apodization type has not been set or of wrong type ({params['APODIZATION']['type']})"
        )
    if params['APODIZATION']['radius'] is None or not isinstance(
            params['APODIZATION']['radius'], (int, float)):
        logger.error(
            f"Apodization radius has not been set or of wrong type ({params['APODIZATION']['radius']})"
        )

    # Initialize the outfile
    out_file = ReconstructionFile(filename=params['PATHS']['outFile'],
                                  files=in_files,
                                  cards={"RECONSTRUCTION": "Holography"},
                                  in_dir=in_dir)

    # Initialize reconstruction
    reconstruction = Reconstruction(
        in_files=in_files,
        mode=mode,
        alignment_method='ssa',
        reference_image=params['PATHS']['alignmentReferenceFile'],
        in_dir=in_dir,
        tmp_dir=tmp_dir,
        out_file=params['PATHS']['outFile'],
        var_ext=params['OPTIONS']['varianceExtensionName'],
        box_indexes=params['OPTIONS']['box_indexes'],
        debug=debug)

    # (i-ii) Align cubes
    # shifts = get_shifts(files=in_files, reference_file=params['PATHS']['alignmentReferenceFile'],
    #                     lazy_mode=True, return_image_shape=False, in_dir=in_dir, debug=debug)
    shifts = reconstruction.shifts

    # (iii) Compute SSA reconstruction
    # image = ssa(in_files, mode=mode, outfile=out_file, in_dir=in_dir, tmp_dir=tmp_dir,
    #             variance_extension_name=params['OPTIONS']['varianceExtensionName'])
    image = reconstruction.coadd_long_exposures()
    if isinstance(image, tuple):
        # SSA returned a reconstruction image and a variance image
        image, image_var = image
    total_flux = np.sum(image)  # Stored for flux conservation

    # Start iteration from steps (iv) through (xi)
    while True:
        # (iv) Astrometry and photometry, i.e. StarFinder
        extract_sources(image=image,
                        fwhm=params['STARFINDER']['starfinderFwhm'],
                        noise_threshold=params['STARFINDER']['noiseThreshold'],
                        background_subtraction=True,
                        write_to=params['PATHS']['allStarsFile'],
                        star_finder='DAO',
                        debug=debug)

        # (v) Select reference stars
        print(
            "\tPlease copy your desired reference stars from the all stars file into the reference star file!"
        )
        input("\tWhen you are done, hit a ENTER.")

        # (vi) PSF extraction
        ref_stars = ReferenceStars(
            psf_radius=params['PSFEXTRACTION']['psfRadius'],
            reference_source_file=params['PATHS']['refSourceFile'],
            in_files=in_files,
            save_dir=tmp_dir,
            in_dir=in_dir,
            field_segmentation=params['PSFEXTRACTION']['fieldSegmentation'])
        if params['PSFEXTRACTION']['mode'].lower() == 'epsf':
            psf_files = ref_stars.extract_epsfs(file_shifts=shifts,
                                                debug=debug)
        elif params['PSFEXTRACTION']['mode'].lower() in [
                'mean', 'median', 'weighted_mean'
        ]:
            psf_files = ref_stars.extract_psfs(
                file_shifts=shifts,
                mode=params['PSFEXTRACTION']['mode'].lower(),
                debug=debug)
        else:
            raise RuntimeError(
                f"PSF extraction mode '{params['PSFEXTRACTION']['mode']}' is not understood!"
            )
        logger.info("Saved the extracted PSFs...")

        # (vii) Noise thresholding
        psf_noise_mask = None
        for file in psf_files:
            with fits.open(file, mode='update') as hdu_list:
                n_frames = hdu_list[0].header['NAXIS3']
                if psf_noise_mask is None:
                    psf_noise_mask = get_noise_mask(
                        hdu_list[0].data[0],
                        noise_reference_margin=params['PSFEXTRACTION']
                        ['noiseReferenceMargin'])
                for index in range(n_frames):
                    reference = np.ma.masked_array(hdu_list[0].data[index],
                                                   mask=psf_noise_mask)
                    background = np.mean(reference)
                    noise = np.std(reference)
                    update = np.maximum(
                        hdu_list[0].data[index] - background -
                        params['PSFEXTRACTION']['noiseThreshold'] * noise, 0.0)
                    if np.sum(update) == 0.0:
                        raise ValueError(
                            "After background subtraction and noise thresholding, no signal is leftover. "
                            "Please reduce the noiseThreshold!")
                    update = update / np.sum(update)  # Flux sum of order unity
                    hdu_list[0].data[index] = update
                    hdu_list.flush()

        # (viii) Subtraction of secondary sources within the reference apertures
        # TODO: Implement Secondary source subtraction
        pass

        # (ix) Estimate object, following Eq. 1 (Schoedel et al., 2013)
        f_object = FourierObject(in_files,
                                 psf_files,
                                 shifts=shifts,
                                 mode=mode,
                                 in_dir=in_dir)
        f_object.coadd_fft()

        # (x) Apodization
        f_object.apodize(type=params['APODIZATION']['type'],
                         radius=params['APODIZATION']['radius'])

        # (xi) Inverse Fourier transform to retain the reconstructed image
        image = f_object.ifft(total_flux=total_flux)

        # Inspect the latest reconstruction
        if debug:
            imshow(image)

        # Save the latest reconstruction image to outfile
        out_file.data = image

        # Ask the user whether the iteration shall be continued or not
        answer = input(
            "\tDo you want to continue with one more iteration? [yes/no]\n\t")
        if answer.lower() in ['n', 'no']:
            break

    # Repeat astrometry and photometry, i.e. StarFinder on final image
    extract_sources(image=image,
                    fwhm=params['STARFINDER']['starfinderFwhm'],
                    noise_threshold=params['STARFINDER']['noiseThreshold'],
                    background_subtraction=True,
                    write_to=params['PATHS']['allStarsFile'],
                    star_finder='DAO',
                    debug=debug)

    # Finally return the image
    return image
예제 #14
0
def main():

    # Parse args
    parser = GeneralArgParser()
    args = parser.parse_args()

    if args.debug:
        logger.setLevel('DEBUG')
        logger.debug(args)

    if args.gui:
        start()

    # Execute the script of the corresponding command
    if args.command is 'generate':

        # Read parameters from file and generate exposures
        target, telescope, detector, parameters = get_objects(args.parfile,
                                                              debug=args.debug)
        generate_exposure(target=target,
                          telescope=telescope,
                          detector=detector,
                          debug=args.debug,
                          **parameters)

    elif args.command is 'reduce':

        # In setup mode
        if args.setup:
            run.setup(path=args.path,
                      instrument=args.instrument,
                      par_file=args.parfile,
                      list_file=args.filelist,
                      sort_by=args.sortby)
        # Else start reduction following the parameter file
        else:
            params = config.read(args.parfile)
            run.full_reduction(params, debug=args.debug)

    elif args.command is 'ssa':

        # Prepare path information and execute reconstruction
        if args.tmpdir is not None and not os.path.isdir(args.tmpdir):
            os.mkdir(args.tmpdir)
        ssa(args.files,
            mode=args.mode,
            tmp_dir=args.tmpdir,
            outfile=args.outfile,
            box_indexes=args.box_indexes,
            debug=args.debug)

    elif args.command is 'holography':

        # Read parameters from file and execute reconstruction
        defaults_file = os.path.join(os.path.dirname(__file__),
                                     '../config/holography.cfg')
        defaults_file = os.path.abspath(defaults_file)
        params = config.read(defaults_file)
        params = config.update_from_file(params, args.parfile)
        holography(params,
                   mode=params['OPTIONS']['reconstructionMode'],
                   debug=args.debug)

    elif args.command is 'aperture':
        if args.mode == 'psf1d':
            logger.info("Extract 1D PSF profile")
            analysis.get_psf_1d(args.file,
                                args.index,
                                args.radius,
                                args.out_file,
                                args.normalize,
                                debug=args.debug)
        elif args.mode == 'variance':
            logger.info("Extract 1D PSF variation")
            analysis.get_psf_variation(args.file, args.index, args.radius,
                                       args.out_file, args.normalize,
                                       args.debug)
        else:
            logger.warning(f"Aperture mode {args.mode} not recognized!")

    elif args.command is 'extract':
        if args.out_file is None:
            args.out_file = 'sources_' + os.path.basename(
                args.file_name).replace('.fits', '.dat')
        extract_sources(image=args.file_name,
                        noise_threshold=args.noise_threshold,
                        fwhm=args.fwhm,
                        image_var=args.var,
                        write_to=args.out_file)

    elif args.command == 'plot':
        plot = Plot.from_file(file_name=args.file,
                              extension=args.extension,
                              columns=args.columns,
                              format=args.format,
                              layout=args.layout,
                              debug=args.debug)
        plot.apply_layout(layout=args.layout)
        plot.save()
        plot.show()

    elif args.command is 'apodization':
        get_resolution_parameters(wavelength=args.wavelength,
                                  diameter=args.diameter,
                                  pixel_scale=args.pixel_scale)
예제 #15
0
    def identify_sequences(self, source='sky'):
        """Identify observation sequences.

        Args:
            source (str, optional):
                Observation type of the images the shall be used to measure the sky background from. Options are 'sky'
                (default) and 'science'.

        Returns:
            sequences (list of Sequence):
                List of observing sequences.
        """

        # Type check
        if isinstance(source, str):
            if source not in ['sky', 'science']:
                raise SpecklepyValueError('identify sequences',
                                          argname='source',
                                          argvalue=source,
                                          expected="'sky' or 'science'")
        else:
            raise SpecklepyTypeError('identify sequences',
                                     argname='source',
                                     argtype=type(source),
                                     expected='str')

        # Identify the observing sequences
        sequences = []
        for setup in self.setups:
            for object in self.objects:
                # Query names and time stamps of science and sky files
                sky_files = self.filter({
                    'OBSTYPE': source.upper(),
                    'OBJECT': object,
                    'SETUP': setup
                })
                sky_time_stamps = self.filter(
                    {
                        'OBSTYPE': source.upper(),
                        'OBJECT': object,
                        'SETUP': setup
                    },
                    namekey='DATE')
                science_files = self.filter({
                    'OBSTYPE': 'SCIENCE',
                    'OBJECT': object,
                    'SETUP': setup
                })
                science_time_stamps = self.filter(
                    {
                        'OBSTYPE': 'SCIENCE',
                        'OBJECT': object,
                        'SETUP': setup
                    },
                    namekey='DATE')

                # Test the number of source files
                if len(sky_files) == 0:
                    logger.warning(
                        f"Did not find any sky observations for object {object} in setup {setup}. No sky "
                        f"subtraction will be applied!")
                else:
                    # Store the information in a new sequence
                    sequences.append(
                        Sequence(sky_files=sky_files,
                                 science_files=science_files,
                                 file_path=self.in_dir,
                                 sky_time_stamps=sky_time_stamps,
                                 science_time_stamps=science_time_stamps,
                                 source=source,
                                 object=object,
                                 setup=setup))
        return sequences
예제 #16
0
    def __init__(self,
                 shape,
                 pixel_scale,
                 optics_transmission=1,
                 quantum_efficiency=1,
                 system_gain=1,
                 readout_noise=None,
                 dark_current=None,
                 saturation_level=None):
        """Instantiate Detector class.

		Args:
			shape (tuple, dtype=int):
				Number of the pixels of the detector along two axes. Integer values will create a square detector array
				with the same number of pixels along both axes.
			pixel_scale (Quantity):
				Angular size of each pixel.
			optics_transmission (float, optional):
				Optical transmission coefficient, scaling between 0.0 and 1.0 (default).
			quantum_efficiency (float, Quantity, optional):
				Quantum efficiency of the detector. Scalar type values will be interpreted units of electrons per
				photon.
			system_gain (float, Quantity, optional):
				System gain of the detector. Scalar type values will be interpreted units of electrons per ADU.
			readout_noise (float, Quantity, optional):
				Read noise of the detector. Scalar type values will be interpreted units of electrons.
			dark_current (float, Quantity, optional):
				Dark current of the detector. Scalar type values will be interpreted units of electrons per second.
			saturation_level (float, Quantity, optional):
				Saturation level of the detector. Scalar type values will be interpreted units of electrons.
		"""

        # Input parameters
        if isinstance(shape, str):
            shape = eval(shape)
        if isinstance(shape, tuple):
            self.shape = shape
        elif isinstance(shape, int):
            self.shape = (shape, shape)
        else:
            raise SpecklepyTypeError('Detector', 'shape', type(shape), 'tuple')

        if isinstance(pixel_scale, Quantity):
            self.pixel_scale = pixel_scale
        elif isinstance(pixel_scale, (int, float)):
            logger.warning(
                f"Interpreting float type pixel_scale as {pixel_scale} arcsec")
            self.pixel_scale = pixel_scale * Unit('arcsec')
        elif isinstance(pixel_scale, str):
            self.pixel_scale = Quantity(pixel_scale)
        else:
            raise SpecklepyTypeError('Detector', 'pixel_scale',
                                     type(pixel_scale), 'Quantity')

        if isinstance(optics_transmission, (int, float)):
            self.optics_transmission = optics_transmission
        elif isinstance(optics_transmission, str):
            self.optics_transmission = float(optics_transmission)
        else:
            raise SpecklepyTypeError('Detector', 'optics_transmission',
                                     type(optics_transmission), 'float')

        if isinstance(quantum_efficiency, Quantity):
            self.quantum_efficiency = quantum_efficiency
        elif isinstance(quantum_efficiency, (int, float)):
            logger.warning(
                f"Interpreting scalar type quantum_efficiency as {quantum_efficiency} electron/ photon"
            )
            self.quantum_efficiency = quantum_efficiency * Unit(
                'electron / ph')
        elif isinstance(quantum_efficiency, str):
            self.quantum_efficiency = Quantity(quantum_efficiency)
        else:
            raise SpecklepyTypeError('Detector', 'quantum_efficiency',
                                     type(quantum_efficiency), 'Quantity')

        if isinstance(system_gain, Quantity):
            self.system_gain = system_gain
        elif isinstance(system_gain, (int, float)):
            logger.warning(
                f"Interpreting scalar type system_gain as {system_gain} electron/ ADU"
            )
            self.system_gain = system_gain * Unit('electron / adu')
        elif isinstance(system_gain, str):
            self.system_gain = Quantity(system_gain)
        else:
            raise SpecklepyTypeError('Detector', 'system_gain',
                                     type(system_gain), 'Quantity')

        if dark_current is None or isinstance(dark_current, Quantity):
            self.dark_current = dark_current
        elif isinstance(dark_current, (int, float)):
            logger.warning(
                f"Interpreting scalar type dark_current as {dark_current} electron/ s"
            )
            self.dark_current = dark_current * Unit('electron / s')
        elif isinstance(dark_current, str):
            self.dark_current = Quantity(dark_current)
        else:
            raise SpecklepyTypeError('Detector', 'dark_current',
                                     type(dark_current), 'Quantity')

        if readout_noise is None or isinstance(readout_noise, Quantity):
            self.readout_noise = readout_noise
        elif isinstance(readout_noise, (int, float)):
            logger.warning(
                f"Interpreting scalar type readout_noise as {readout_noise} electron"
            )
            self.readout_noise = readout_noise * Unit('electron')
        elif isinstance(readout_noise, str):
            self.readout_noise = Quantity(readout_noise)
        else:
            raise SpecklepyTypeError('Detector', 'readout_noise',
                                     type(readout_noise), 'Quantity')

        if isinstance(saturation_level, Quantity) or saturation_level is None:
            self.saturation_level = saturation_level
        elif isinstance(saturation_level, (int, float)):
            logger.warning(
                f"Interpreting scalar type saturation_level as {saturation_level} electron"
            )
            self.saturation_level = saturation_level * Unit('electron')
        elif isinstance(saturation_level, str):
            self.saturation_level = Quantity(saturation_level)
        else:
            raise SpecklepyTypeError('Detector', 'saturation_level',
                                     type(saturation_level), 'Quantity')

        # Derive secondary parameters
        self.array = np.zeros(self.shape)
        self.field_of_view = ScaledTuple(self.shape,
                                         scale=self.pixel_scale).scaled
예제 #17
0
파일: run.py 프로젝트: deepin00/specklepy
def full_reduction(params, debug=False):
    """Execute a full reduction following the parameters in the `params` dictionary.

    TODO: Split this function into the parts and sort into the other modules

    Args:
        params (dict):
            Dictionary with all the settings for reduction.
        debug (bool, optional):
            Show debugging information.
    """

    # Set logging level
    if debug:
        logger.setLevel('DEBUG')

    # (0) Read file list table
    logger.info("Reading file list ...")
    in_files = FileArchive(file_list=params['PATHS']['fileList'],
                           in_dir=params['PATHS']['filePath'],
                           out_dir=params['PATHS']['outDir'])
    logger.info('\n' + str(in_files.table))

    # (1) Initialize directories and reduction files
    if not os.path.isdir(params['PATHS']['outDir']):
        os.makedirs(params['PATHS']['outDir'])
    if not os.path.isdir(params['PATHS']['tmpDir']):
        os.makedirs(params['PATHS']['tmpDir'])
    if 'skip' in params['PATHS'] and params['PATHS']['skip']:
        product_files = glob.glob(os.path.join(params['PATHS']['outDir'], '*fits'))
    else:
        product_files = in_files.initialize_product_files(prefix=params['PATHS']['prefix'])

    # (2) Flat fielding
    if 'skip' in params['FLAT'] and params['FLAT']['skip']:
        logger.info('Skipping flat fielding as requested from parameter file...')
    else:
        flat_files = in_files.get_flats()
        if len(flat_files) == 0:
            logger.warning("Did not find any flat field observations. No flat field correction will be applied!")
        else:
            logger.info("Starting flat field correction...")
            master_flat = flat.MasterFlat(flat_files, file_name=params['FLAT']['masterFlatFile'],
                                          file_path=params['PATHS']['filePath'], out_dir=params['PATHS']['tmpDir'])
            master_flat.combine()
            master_flat.run_correction(file_list=product_files, file_path=None)

    # (3) Linearization
    # TODO: Implement linearization

    # (4) Sky subtraction
    if 'skip' in params['SKY'] and params['SKY']['skip']:
        logger.info('Skipping sky background subtraction as requested from parameter file...')
    else:
        logger.info("Starting sky subtraction...")
        try:
            sky.subtract_sky_background(**params['SKY'], in_files=in_files, out_files=product_files,
                                        file_path=params['PATHS']['filePath'], tmp_dir=params['PATHS']['tmpDir'])
        except RuntimeError as e:
            raise RuntimeWarning(e)

    # Close reduction
    logger.info("Reduction finished...")