示例#1
0
	def model_psf(self, model, radius, psf_resolution, shape=256, **kwargs):
		"""Models the PSF given the desired model function and kwargs.

		Args:
			model (str):
				Must be either 'airydisk' or 'gaussian'.
			radius (int, float, astropy.unit.Quantity):
				Radius of the PSF model that is the radius of the first zero in an AiryDisk model or the standard
				deviation of the Gaussian model. Scalar values will be interpreted in units of arcseconds.
			psf_resolution (int, float, astropy.unit.Quantity):
				Resolution of the model PSF, equivalent to the pixel scale of the array. Scalar values will be
				interpreted in units of arcseconds.
			shape (int, optional):
				Size of the model PSF along both axes.
			kwargs are forwarded to the model function.
		"""

		# Check input parameters
		if not isinstance(model, str):
			raise SpecklepyTypeError('model_psf', 'model', type(model), 'str')

		if isinstance(radius, Quantity):
			self.radius = radius
		elif isinstance(radius, (int, float)):
			logger.warning(f"Interpreting scalar type radius as {radius} arcsec")
			self.radius = Quantity(f"{radius} arcsec")
		elif isinstance(radius, str):
			self.radius = Quantity(radius)
		else:
			raise SpecklepyTypeError('model_psf', 'radius', type(radius), 'Quantity')

		if isinstance(psf_resolution, Quantity):
			self.psf_resolution = psf_resolution
		elif isinstance(psf_resolution, (int, float)):
			logger.warning(f"Interpreting scalar type psf_resolution as {psf_resolution} arcsec")
			self.psf_resolution = Quantity(f"{psf_resolution} arcsec")
		elif isinstance(psf_resolution, str):
			self.psf_resolution = Quantity(psf_resolution)
		else:
			raise SpecklepyTypeError('model_psf', 'psf_resolution', type(psf_resolution), 'Quantity')

		if isinstance(shape, int):
			center = (shape / 2, shape / 2)
			shape = (shape, shape)
		elif isinstance(shape, tuple):
			center = (shape[0] / 2, shape[1] / 2)
		else:
			raise SpecklepyTypeError('model_psf', 'shape', type(shape), 'int or tuple')

		if model.lower() == 'airydisk':
			model = models.AiryDisk2D(x_0=center[0], y_0=center[1], radius=float(self.radius / self.psf_resolution))
		elif model.lower() == 'gaussian':
			stddev = float(self.radius / self.psf_resolution)
			model = models.Gaussian2D(x_mean=center[0], y_mean=center[1], x_stddev=stddev, y_stddev=stddev)
		else:
			raise SpecklepyValueError('model_psf', 'model', model, 'either AiryDisk or Gaussian')

		y, x = np.mgrid[0:shape[0], 0:shape[1]]
		self.psf = model(x, y)
		self.psf = self.normalize(self.psf)
示例#2
0
	def __init__(self, diameter, psf_source, central_obscuration=None, psf_frame=0, **kwargs):
		"""Instantiate Telescope class:

		Args:
			diameter (float or astropy.units.Quantity):
				Telescope diameter, used to compute the light collecting area.
			psf_source (str):
				File name to read PSFs from or model name. Models can be either 'AiryDisk' or 'Gaussian'. The models
				require central_obscuration (float, optional): Radial fraction of the
				telescope aperture that is blocked by the secondary.
			central_obscuration (float, optional):
				Radial fraction that is centrally obscured, by the secondary mirror.
			psf_frame (int, optional):
				Index of the first frame to read from psf_source.
			kwargs:
				Are forwarded to the psf_source model.
		"""

		# Input parameters
		if isinstance(diameter, Quantity):
			self.diameter = diameter
		elif isinstance(diameter, (int, float)):
			logger.warning(f"Interpreting scalar type diameter as {diameter} m")
			self.diameter = Quantity(f"{diameter} m")
		elif isinstance(diameter, str):
			self.diameter = Quantity(diameter)
		else:
			raise SpecklepyTypeError('Telescope', 'diameter', type(diameter), 'Quantity')

		if isinstance(psf_source, str):
			self.psf_source = psf_source
		else:
			raise SpecklepyTypeError('Telescope', 'psf_source', type(psf_source), 'str')

		if isinstance(central_obscuration, float) or central_obscuration is None:
			self.central_obscuration = central_obscuration
		elif isinstance(central_obscuration, str):
			self.central_obscuration = float(central_obscuration)
		else:
			raise SpecklepyTypeError('Telescope', 'central_obscuration', type(central_obscuration), 'float')

		if isinstance(psf_frame, int):
			self.psf_frame = psf_frame
		else:
			raise SpecklepyTypeError('Telescope', 'psf_frame', type(psf_frame), 'int')

		# Derive secondary parameters
		if self.central_obscuration is not None:
			self.area = (1. - self.central_obscuration**2) * np.pi * (self.diameter / 2)**2
		else:
			self.area = np.pi * (self.diameter / 2)**2

		if psf_source.lower() in ['airydisk', 'gaussian']:
			self.model_psf(psf_source, **kwargs)
		else:
			psf, time_resolution, angular_resolution = self.read_psf_file(psf_source)
			self.psf = psf
			self.psf_resolution = angular_resolution
			if time_resolution is not None:
				self.timestep = time_resolution
示例#3
0
def desaturate_color(color,
                     ncolors=1,
                     saturation_values=None,
                     saturation_min=0.1):
    """Desaturates a color and returns a list of desaturated colors.

    Args:
        color (str or ...):
        ncolors (int): Number of returned colors.
        saturation_values (None or list, dtype=float):
        saturation_min (float): Minimum value of saturation.

    Returns:
        colors (list): List of RGB represnations of colors with length ncolors.
    """

    # Input parameters
    if isinstance(color, str):
        rgb_color = clrs.to_rgb(color)
        hsv_color = clrs.rgb_to_hsv(rgb_color)
    elif isinstance(color, tuple):
        logger.info("Interpreting color tuple () as RGB values.")
        hsv_color = clrs.rgb_to_hsv(color)
    else:
        raise SpecklepyTypeError('desaturate_color()', 'color', type(color),
                                 'str')

    if not isinstance(ncolors, int):
        raise SpecklepyTypeError('desaturate_color()', 'ncolors',
                                 type(ncolors), 'int')

    if not isinstance(saturation_min, float):
        raise SpecklepyTypeError('desaturate_color()', 'saturation_min',
                                 type(saturation_min), 'float')

    if saturation_values is None:
        saturation_values = np.linspace(hsv_color[1],
                                        saturation_min,
                                        num=ncolors)
    elif isinstance(saturation_values, list):
        pass  # list is correct, nothing to adapt
    elif isinstance(saturation_values, float):
        saturation_values = list(saturation_values)
    else:
        raise SpecklepyTypeError('desaturate_color()', 'saturation_values',
                                 type(saturation_values), 'list')

    # Create list of colors with varied saturation values
    colors = []
    for saturation_value in saturation_values:
        color = clrs.hsv_to_rgb((hsv_color[0], saturation_value, hsv_color[2]))
        colors.append(color)

    return colors
示例#4
0
def subtract_scalar_background(files, params, prefix=None, debug=False):
    """Estimate and subtract a scalar background."""

    if not isinstance(files, (list, np.ndarray)):
        raise SpecklepyTypeError('substract_scalar_background',
                                 argtype=type(files),
                                 argname='files',
                                 expected='list')
    else:
        if len(files) == 0:
            raise RuntimeError(
                "Sky subtraction received an empty list of files!")

    logger.info("Estimating scalar background and subtract...")
    for file_index, file in enumerate(files):

        image, header = fits.getdata(os.path.join(params.paths.filePath, file),
                                     header=True)

        # Update header for and initialize the outfile
        header.set('PIPELINE', 'SPECKLEPY')
        header.set('SKYCORR', str(datetime.now()))
        corrected_file = prefix + file
        corrected_file = os.path.join(params.paths.filePath, corrected_file)
        outfile = Outfile(filename=corrected_file,
                          header=header,
                          shape=image.shape)

        # Estimate scalar background and uncertainties, and subtract
        if image.ndim == 2:
            mean, median, std = sigma_clipped_stats(
                image, sigma=params.sky.backgroundSigmaClip)
            outfile.data = image - mean
            image_var = np.ones(image.shape) * np.square(std)
            outfile.new_extension(name='VAR', data=image_var)
        elif image.ndim == 3:
            means, medians, stds = sigma_clipped_stats(
                image, sigma=params.sky.backgroundSigmaClip, axis=(1, 2))
            logger.info(
                f"Sigma clipped stats:\t{np.mean(means):.2f} +- {np.mean(stds):.2f}"
            )
            outfile.new_extension(name='VAR', data=np.zeros(image.shape))
            tmp_frame = np.ones(image[0].shape)

            for frame_index, frame in enumerate(image):
                print(f"\r\tUpdating frame {frame_index+1:3}...", end='')
                outfile.update_frame(frame_index=frame_index,
                                     data=np.subtract(frame,
                                                      means[frame_index]))
                outfile.update_frame(frame_index=frame_index,
                                     data=tmp_frame *
                                     np.square(stds[frame_index]),
                                     extension='VAR')
            print()
        else:
            raise RuntimeError(
                f"Images are supposed to have 2 or 3 dimensions but this one has {image.ndim}!"
            )

    logger.info("Scalar background subtraction complete!")
示例#5
0
    def filter(self, filter_dict, namekey='FILE'):
        """Filter the archive's table by the column properties.

        filter_dict = {'name_of_column': [desired_value_1, desired_value_2]}

        Args:
            filter_dict:
                Dictionary that holds the table column names as keys and acceptable values as lists as the
                corresponding values.
            namekey (str, optional):
                Name/ key of the out put column that shall be filtered by `filter_dict`.

        Returns:
            filtered (np.array):
                Array_like subset of the column `namekey` that match the criteria based on filter_dict.
        """

        if not isinstance(filter_dict, dict):
            raise SpecklepyTypeError('FileArchive.filter', 'filter_dict',
                                     type(filter_dict), 'dict')

        mask = [True] * len(self.table[namekey])
        for index, key in enumerate(filter_dict.keys()):
            if isinstance(filter_dict[key], list):
                submask = [False] * len(self.table[namekey])
                for correct in filter_dict[key]:
                    submask |= (self.table[key] == correct)
                mask &= submask
            else:
                mask &= (filter_dict[key] == self.table[key])

        return self.table[namekey][mask].data
示例#6
0
def imshow(image,
           title=None,
           norm=None,
           colorbar_label=None,
           saveto=None,
           maximize=False):
    """Shows a 2D image.

    Args:
        image (np.ndarray, ndim=2):
            Image to be plotted.
        title (str, optional):
            Plot title. Default is None.
        norm (str, optional):
            Can be set to 'log', for plotting in logarithmic scale. Default is
            None.
        colorbar_label (str, optional):
            Label of the color bar. Default is None.
        saveto (str, optional):
            Path to save the plot to. Default is None.
        maximize (bool, optional):
            Set true for showing the plot on full screen. Default is False.
    """

    if isinstance(image, np.ndarray):
        if image.ndim != 2:
            raise SpecklepyValueError('imshow()', 'image.ndim', image.ndim,
                                      '2')
        if isinstance(image, u.Quantity):
            unit = image.unit
            colorbar_label = "({})".format(unit)
            image = image.value
    else:
        raise SpecklepyTypeError('imshow()', 'image', type(image),
                                 'np.ndarray')

    if norm == 'log':
        norm = clrs.LogNorm()
    plt.figure()
    plt.imshow(image, norm=norm, origin='lower')
    plt.title(title)
    if maximize:
        maximize_plot()

    # Colorbar
    cbar = plt.colorbar(pad=0.0)
    if colorbar_label is not None:
        cbar.set_label(colorbar_label)

    if saveto is not None:
        plt.savefig(saveto, dpi=300)

    plt.show()
    plt.close()
示例#7
0
def time_difference(t0, times):
    """Compute time difference(s) from one time to others.

    Args:
        t0 (str, datetime):
            Time stamp whose difference to times is computed.
        times (str, datetime, list):
            Times to which the time difference is computed.

    Returns:
        timedeltas (float, np.array):
            Time difference(s) between from t0 to times (in seconds).
    """

    # Type check
    if isinstance(t0, str):
        t0 = parser.parse(t0)
    elif isinstance(t0, datetime):
        pass
    else:
        raise SpecklepyTypeError('time_difference', 't0', type(t0),
                                 'str or datetime')

    if isinstance(times, str):
        times = parser.parse(times)
    elif isinstance(times, (datetime, list, np.ndarray)):
        pass
    else:
        raise SpecklepyTypeError('time_difference', 'times', type(times),
                                 'str, datetime, or list')

    # Compute time deltas in units of seconds
    if isinstance(times, (list, np.ndarray)):
        timedeltas = np.empty((len(times)))
        for i, o in enumerate(times):
            if isinstance(o, str):
                o = parser.parse(o)
            timedeltas[i] = (o - t0).total_seconds()
        return timedeltas
    else:
        return (times - t0).total_seconds()
示例#8
0
    def __init__(self, file_list, file_name='MasterFlat.fits', file_path=None, out_dir=None):

        # Store input parameters
        if isinstance(file_list, (list, np.ndarray)):
            self.files = file_list
        elif isinstance(file_list, Table):
            is_flat_file = file_list['OBSTYPE'] == 'FLAT'
            self.files = file_list['FILE'][is_flat_file]
        else:
            raise SpecklepyTypeError('MasterFlat', 'file_list', type(file_list), 'astropy.table.Table')

        if isinstance(file_name, str):
            self.file_name = file_name
        else:
            raise SpecklepyTypeError('MasterFlat', 'file_name', type(file_name), 'str')

        if isinstance(file_path, str) or file_path is None:
            self.file_path = file_path
        else:
            raise SpecklepyTypeError('MasterFlat', 'file_path', type(file_path), 'str')

        # Create an output file
        self.master_file = MasterFile(self.file_name, files=self.files, in_dir=file_path, out_dir=out_dir)
示例#9
0
文件: box.py 项目: deepin00/specklepy
    def __init__(self, indexes=None):

        # Initialize attributes with kwargs
        self.x_min = None
        self.x_max = None
        self.y_min = None
        self.y_max = None

        # Interpret indexes input and overwrite
        if indexes is not None:
            if not isinstance(indexes, list):
                raise SpecklepyTypeError('Box',
                                         argname='*args',
                                         argtype=type(indexes),
                                         expected='list')
            else:
                if len(indexes) == 1:
                    if len(indexes[0]) == 1:
                        raise SpecklepyValueError
                    else:
                        logger.debug("Using the same limits for both axes")
                        self.x_min = indexes[0][0]
                        self.x_max = indexes[0][1]
                        self.y_min = indexes[0][0]
                        self.y_max = indexes[0][1]
                elif len(indexes) == 2:
                    if isinstance(indexes[0], int) and isinstance(
                            indexes[1], int):
                        logger.debug("Using the same limits for both axes")
                        self.x_min = indexes[0]
                        self.x_max = indexes[1]
                        self.y_min = indexes[0]
                        self.y_max = indexes[1]
                    elif isinstance(indexes[0], list) and isinstance(
                            indexes[1], list):
                        try:
                            self.x_min = indexes[0][0]
                            self.x_max = indexes[0][1]
                        except IndexError:
                            pass
                        try:
                            self.y_min = indexes[1][0]
                            self.y_max = indexes[1][1]
                        except IndexError:
                            pass
                elif len(indexes) == 4:
                    self.x_min = indexes[0]
                    self.x_max = indexes[1]
                    self.y_min = indexes[2]
                    self.y_max = indexes[3]
示例#10
0
    def identify_reference_file(self):

        # Initialize reference file
        reference_file = None

        # Interpret reference image
        if isinstance(self.reference_image, str):
            reference_file = self.reference_image
        elif isinstance(self.reference_image, int):
            reference_file = self.in_files[self.reference_image]
        else:
            SpecklepyTypeError('Reconstruction', 'reference_image', type(self.reference_image), 'int or str')

        return reference_file
示例#11
0
 def set_width(self, val):
     if isinstance(val, str):
         if val == 'text':
             val = 10
         elif val == 'column':
             val = 5
         else:
             raise SpecklepyValueError('Plot.set_width',
                                       argname='val',
                                       argvalue=val,
                                       expected="'text' or 'column'")
     elif not isinstance(val, (int, float)):
         raise SpecklepyTypeError('Plot.set_width',
                                  argname='val',
                                  argtype=type(val),
                                  expected='str or float')
     self.figure.set_figwidth(val=val)
示例#12
0
    def identify_setups(self, keywords):
        """Identify distinct observational setups in a list of files.

        This function identifies distinct observational setups.

        Args:
            keywords (list of str):
        """

        # Check input parameters
        if not isinstance(keywords, list):
            raise SpecklepyTypeError('identify_setups', 'keywords',
                                     type(keywords), 'list')

        # Identifying setups key-by-key
        logger.info(
            "Identifying distinct observational setups in the file list...")
        # self.table['SETUP'] = [None] * len(self.table)
        self.table.add_column(
            col=Column(data=[None] * len(self.table), name='SETUP'))

        # Iterate over keywords and identify unique settings per key
        for key in keywords:
            try:
                unique = np.unique(self.table[key].data)
            except KeyError:
                logger.info(
                    f"Key {key} is not available in the file table and will be ignored!"
                )
                continue
            logger.info(
                f"Identified {len(unique)} setups by keyword {key}:\t{unique}")

            for index, setup in enumerate(unique):
                for row in self.table:
                    if row[key] == setup:
                        if row['SETUP'] is None:
                            row['SETUP'] = str(index)
                        else:
                            row['SETUP'] += str(index)

        # Overwrite setup keys by length-1 string
        combinations = np.unique(self.table['SETUP'].data)
        for index, combination in enumerate(combinations):
            row_indexes = np.where(self.table['SETUP'].data == combination)
            self.table['SETUP'][row_indexes] = string.ascii_uppercase[index]
示例#13
0
    def __init__(self, options=None):
        """Create a Section instance from an options dict.

        Args:
            options (configparser.SectionProxy, optional):
                All section options to be stored.
        """

        if not isinstance(options, (dict, configparser.SectionProxy)):
            raise SpecklepyTypeError('Section',
                                     argname='options',
                                     argtype=type(options),
                                     expected='dict')

        for key in options.keys():
            value = options[key]
            try:
                setattr(self, key, eval(value))
            except:
                setattr(self, key, value)
示例#14
0
	def integrate_psf(self, integration_time, hdu=None):
		"""Integrates psf frames over the input time.

		Args:
			integration_time (Quantity):
				This is used for computing the number of frames 'n_frames', via floor division by the 'timestep'
				attribute.
			hdu (int, str, optional):
				Specifier of the HD Default is None for the first HD
		"""

		# Check input parameters
		if isinstance(integration_time, (int, float)):
			logger.warning(f"Interpreting scalar type integration_time as {integration_time} s")
			integration_time = integration_time * Unit('s')
		elif not isinstance(integration_time, Quantity):
			raise SpecklepyTypeError('integrate_psf', 'integration_time', type(integration_time), 'Quantity')

		if integration_time < self.timestep:
			raise ValueError(f"integrate_psf received integration time {integration_time} shorter than the time "
							 f"resolution of the psf source ({self.timestep})!")

		n_frames = int(integration_time / self.timestep)

		# Read PSF frames from source file
		data = fits.getdata(self.psf_source, hdu)

		self.psf_frame += 1
		if self.psf_frame + n_frames < data.shape[0]:
			self.psf = np.sum(data[self.psf_frame : self.psf_frame+n_frames], axis=0)
		else:
			self.psf = np.sum(data[self.psf_frame : ], axis=0)
			self.psf += np.sum(data[ : (self.psf_frame+n_frames) % data.shape[0]], axis=0)
		self.psf_frame += n_frames - 1
		self.psf_frame = self.psf_frame % data.shape[0]

		# Normalize the integrated PSF
		self.psf = self.normalize(self.psf)
示例#15
0
def encircled_energy_plot(files, normalize=None, maximize=False):
    """Plots the encircled energy data from a file.

    Args:
        files (str):
            Name of the file to extract the data from.
        normalize (str, optional):
            Normalization mode, can be None, 'peak' and 'last'. Default is None.
        maximize (bool, optional:
            Show plots on full screen. Default is False.
    """

    # Input parameters
    if isinstance(files, str):
        plt.title(files)
        files = [files]
    if not isinstance(files, list):
        raise SpecklepyTypeError('encircled_energy_plot()', 'files',
                                 type(files), 'list')

    for file in files:
        xdata, ydata = np.loadtxt(file).transpose()

        plt.xlabel("Radius (pix)")
        if normalize == 'peak':
            plt.ylabel("Peak normalized flux")
            ydata /= ydata[0]
        elif normalize == 'last':
            plt.ylabel("Sky normalized flux")
            ydata /= ydata[-1]
        else:
            plt.ylabel("Flux")

        plt.plot(xdata, ydata)
    if maximize:
        maximize_plot()
    plt.show()
    plt.close()
示例#16
0
	def normalize(array, mode='sum_circular'):
		"""Normalizes the input array depending on the mode.

		Args:
			array (np.ndarray):
				Array to be normalized.
			mode (str, optional):
				Can be either 'sum' for having a sum of 1, 'max' for having a peak value 1, or 'sum_circular' for
				subtracting a constant and then normalizing to a sum of 1. Default is 'sum_circular'.

		Returns:
			normalized (np.ndarray):
				Normalized array, according to mode.
		"""

		if not isinstance(array, np.ndarray):
			raise SpecklepyTypeError('normalize', 'array', type(array), 'np.ndarray')
		if np.sum(array) == 0:
			raise ValueError("Normalize received an array of zeros!")

		if mode not in ['sum', 'max', 'peak', 'sum_circular']:
			raise SpecklepyValueError('normalize', 'mode', mode, "'sum', 'max', or 'sum_circular'")

		if mode == 'sum':
			normalized = array / np.sum(array)
		elif mode == 'max':
			normalized = array / np.max(array)
		elif mode == 'sum_circular':
			x, y = array.shape
			low_cut = array[0, int(y/2)]
			array = np.maximum(array - low_cut, 0)
			normalized = array / np.sum(array)
		else:
			normalized = None

		return normalized
示例#17
0
def get_pad_vectors(shifts,
                    cube_mode=False,
                    return_reference_image_pad_vector=False):
    """Computes padding vectors from the relative shifts between files.

    Args:
        shifts (list or np.ndarray):
            Shifts between files, relative to a reference image. See get_shifts function for details.
        cube_mode (bool, optional):
            If image is a cube, the estimated pad vectors will obtain pad_vector entries of (0, 0) for the zeroth axis.
            Default is False.
        return_reference_image_pad_vector (bool, optional):
            In 'same' mode, pad_array needs also the pad vector of the reference image. This is returned if this arg is
            set True. Default is False.

    Returns:
        pad_vectors (list):
            List of padding vectors for each shift in shifts.
        reference_image_pad_vector (list, optional):
            Pad vector of the reference image, which is needed for pad_array()
            in 'same' mode, thus is only returned in 'same' mode.
    """

    # Check input parameters
    if isinstance(shifts, (list, np.ndarray)):
        pass
    else:
        raise SpecklepyTypeError('get_pad_vectors()',
                                 argname='shifts',
                                 argtype=type(shifts),
                                 expected='list')
    if not isinstance(cube_mode, bool):
        raise SpecklepyTypeError('get_pad_vectors()',
                                 argname='cube_mode',
                                 argtype=type(cube_mode),
                                 expected='bool')
    if not isinstance(return_reference_image_pad_vector, bool):
        raise SpecklepyTypeError(
            'get_pad_vectors()',
            argname='return_reference_image_pad_vector',
            argtype=type(return_reference_image_pad_vector),
            expected='bool')

    # Initialize list
    pad_vectors = []

    # Get extreme points for 'full' padding
    xmax, ymax = np.max(np.array(shifts), axis=0)
    xmin, ymin = np.min(np.array(shifts), axis=0)

    # Iterate over Shifts
    for shift in shifts:

        if cube_mode:
            pad_vector = [(0, 0)]
        else:
            pad_vector = []

        pad_vector.append((shift[0] - xmin, xmax - shift[0]))
        pad_vector.append((shift[1] - ymin, ymax - shift[1]))

        pad_vectors.append(pad_vector)

    # In 'same' mode, pad_array needs also the pad vector of the reference image
    if return_reference_image_pad_vector:
        reference_image_pad_vector = [(np.abs(xmin), np.abs(xmax)),
                                      (np.abs(ymin), np.abs(ymax))]
        return pad_vectors, reference_image_pad_vector
    else:
        return pad_vectors
示例#18
0
	def get_photon_rate(self, photon_rate_density, photon_rate_density_resolution=None, integration_time=None,
						debug=False):
		"""Propagates the 'photon_rate_density' array through the telescope.

		The photon_rate_density is multiplied by the telescope collecting area and then
		convolved with the PSF. If the resolution of the flux array is different
		from the telescopes psf_resolution, then one is resampled. If the PSF is
		non-static, it will be integrated over the 'integration_time' value.

		Args:
			photon_rate_density (np.ndarray, dtype=Quantity):
			photon_rate_density_resolution (Quantity, optional):
			integration_time(Quantity, optional):
				Required only if the PSF is non-static.
			debug (bool, optional):
				Show additional information for debugging. Default is False.

		Returns:
			photon_rate (Quantity): PSF-convolved photon rate array.
		"""

		# Input parameters
		if not isinstance(photon_rate_density, Quantity):
			raise SpecklepyTypeError('get_photon_rate', 'photon_rate_density', type(photon_rate_density), 'Quantity')

		if photon_rate_density_resolution is not None:
			if not isinstance(photon_rate_density_resolution, Quantity):
				raise SpecklepyTypeError('get_photon_rate', 'photon_rate_density_resolution',
										 type(photon_rate_density_resolution), 'Quantity')
			psf_resample_mode = True
		else:
			psf_resample_mode = False

		if integration_time is None and hasattr(self, 'timestep'):
			raise ValueError("If the PSF source of Telescope is non-static, the call function requires the "
							 "integration_time.")
		elif isinstance(integration_time, (int, float)):
			logger.warning(f"Interpreting scalar type integration_time as {integration_time} s")
			integration_time = Quantity(f"{integration_time} s")
		elif not isinstance(integration_time, Quantity):
			raise SpecklepyTypeError('get_photon_rate', 'integration_time', type(integration_time), 'Quantity')

		# Apply telescope collecting area
		photon_rate = photon_rate_density * self.area
		total_flux = np.sum(photon_rate)
		photon_rate_unit = photon_rate.unit

		# Prepare PSF if non-static
		if hasattr(self, 'timestep'):
			self.integrate_psf(integration_time=integration_time)

		# Resample photon_rate_density to psf resolution
		if psf_resample_mode:

			# Compute the zoom factor
			ratio = float(photon_rate_density_resolution / self.psf_resolution)
			# try:
			# 	ratio = float(photon_rate_density_resolution / self.psf_resolution)
			# except UnitConversionError as e:
			# 	raise UnitConversionError(f"The resolution values of the image ({photon_rate_density_resolution}) and "
			# 							  f"PSF ({self.psf_resolution}) have different units!")

			# Zoom either the image or the PSF, depending on the zoom factor
			with warnings.catch_warnings():
				warnings.simplefilter('ignore')
				if ratio < 1.0:
					self.psf = zoom(self.psf, 1/ratio, order=1) / ratio**2
					self.psf = self.normalize(self.psf)
				else:
					memory_sum = np.sum(photon_rate)
					photon_rate = zoom(photon_rate, ratio, order=1) / ratio**2
					photon_rate = photon_rate / np.sum(photon_rate) * memory_sum

		# Convolve the array with the PSF
		convolved = fftconvolve(photon_rate, self.psf, mode='same') * photon_rate_unit

		# Report on flux conservation
		logger.debug(f"Flux conservation during convolution: {total_flux} > {np.sum(convolved)}")

		return convolved.decompose()
示例#19
0
def extract_sources(image,
                    noise_threshold,
                    fwhm,
                    star_finder='DAO',
                    image_var=None,
                    background_subtraction=True,
                    write_to=None,
                    debug=True):
    """Extract sources from an image with a StarFinder routine.

    Long description...

    Args:
        image (np.ndarray or str):
            Image array or the name of a file containing the image array.
        noise_threshold (float):
            Multiple of the uncertainty/ standard deviation of the image.
        fwhm (float):
            Expected full width at half maximum (FWHM) of the sources in units of pixels.
        star_finder (str, optional):
            Choose whether the 'DAO' or 'IRAF' StarFinder implementations from photutils shall be used. Default is
            'DAO'.
        image_var (float or str):
            Variance of the image used for the StarFinder threshold (=noise_threshold * sqrt(image_var)). If not
            provided, the code extracts this value from sigma clipped stats. If provided as str-type, the code tries to
            use this as a key to the FITS file HDU list.
        background_subtraction (bool, optional):
            Let the StarFinder consider the background subtraction. Set False for ignoring background flux. Default is
            `True`.
        write_to (str, optional):
            If provided as a str, the list of identified sources is saved to this file.
        debug (bool, optional):
            Show debugging information. Default is `False`.

    Returns:
        sources (astropy.table.Table): Table of identified sources, None if no
            sources are detected.
    """

    # Set logger level
    if debug:
        logger.setLevel('DEBUG')

    # Input parameters
    if isinstance(image, np.ndarray):
        filename = 'current cube'
    elif isinstance(image, str):
        logger.info(
            "The argument image '{}' is interpreted as file name.".format(
                image))
        filename = image
        image = fits.getdata(filename)
        image = image.squeeze()
    else:
        raise SpecklepyTypeError('extract_sources()',
                                 argname='image',
                                 argtype=type(image),
                                 expected='np.ndarray or str')

    # Prepare noise statistics
    mean, median, std = sigma_clipped_stats(image, sigma=3.0)
    logger.info(
        f"Noise statistics for {filename}:\n\tMean = {mean:.3}\n\tMedian = {median:.3}\n\tStdDev = {std:.3}"
    )

    # Set detection threshold
    if image_var is None:
        threshold = noise_threshold * std
    else:
        if isinstance(image_var, str):
            # Try to load variance extension from file
            image_var = fits.getdata(filename, image_var)
            image_var = np.mean(image_var)
        threshold = noise_threshold * np.sqrt(image_var)

    # Set sky background
    if background_subtraction:
        logger.info(f"Considering mean sky background of {mean}")
        sky = mean
    else:
        sky = 0.0

    # Instantiate StarFinder object
    if not isinstance(star_finder, str):
        raise SpecklepyTypeError('extract_sources',
                                 argname='starfinder',
                                 argtype=type(star_finder),
                                 expected='str')
    if 'dao' in star_finder.lower():
        star_finder = DAOStarFinder(fwhm=fwhm, threshold=threshold, sky=sky)
    elif 'iraf' in star_finder.lower():
        star_finder = IRAFStarFinder(fwhm=fwhm, threshold=threshold, sky=sky)
    else:
        raise SpecklepyValueError('extract_sources',
                                  argname='star_finder',
                                  argvalue=star_finder,
                                  expected="'DAO' or 'IRAF")

    # Find stars
    logger.info("Extracting sources...")
    sources = star_finder(image)

    # Reformatting sources table
    sources.sort('flux', reverse=True)
    sources.rename_column('xcentroid', 'x')
    sources.rename_column('ycentroid', 'y')
    sources.keep_columns(['x', 'y', 'flux'])

    # Add terminal output
    logger.info(f"Extracted {len(sources)} sources")
    logger.debug(sources)

    # Save sources table to file, if requested
    if write_to is not None:
        logger.info("Writing list of sources to file {}".format(write_to))
        sources.write(write_to, format='ascii.fixed_width', overwrite=True)

    return sources
示例#20
0
    def __init__(self,
                 in_file,
                 out_dir,
                 frame_shape,
                 in_dir=None,
                 cards=None,
                 header_card_prefix=None):
        """Create a PSFFile instance.

        Args:
            in_file (str):
                Name of the parent file.
            out_dir (str):
                Name of the directory that the file will be stored in.
            frame_shape (tuple):
                Shape of the PSF frames, which is the box size.
            in_dir (str, optional):
                Path to the input file.
            cards (dict, optional):
                Dictionary of header cards.
            header_card_prefix (str, optional):
        """

        # Create PSF directory, if not existing yet
        if not os.path.exists(out_dir):
            logger.info(f"Creating PSF directory {out_dir}")
            os.makedirs(out_dir)

        # Adapt filename to form the name of the out_file
        # _, out_file = os.path.split(in_file)
        # out_file = out_file.replace('.fits', '_psfs.fits')
        out_file = 'psf_' + os.path.basename(in_file)
        # self.filename = outDir + out_file

        # Type assertion
        if not isinstance(frame_shape, tuple):
            raise SpecklepyTypeError('PSFFile', 'frame_shape',
                                     type(frame_shape), 'tuple')

        if cards is None:
            cards = {}
        elif not isinstance(cards, dict):
            raise SpecklepyTypeError('PSFFile', 'cards', type(cards), 'dict')

        if header_card_prefix is None:
            header_card_prefix = ""
        elif not isinstance(header_card_prefix, str):
            raise SpecklepyTypeError('PSFFile', 'header_card_prefix',
                                     type(header_card_prefix), 'str')

        # Add name of parent file to header
        cards["FILE NAME"] = os.path.basename(in_file)

        # Derive data shape
        if in_dir is not None:
            hdr_input = fits.getheader(os.path.join(in_dir, in_file))
        else:
            hdr_input = fits.getheader(in_file)
        shape = (hdr_input['NAXIS3'], frame_shape[0], frame_shape[1])

        super().__init__(filename=out_file,
                         path=out_dir,
                         shape=shape,
                         cards=cards,
                         header_card_prefix=header_card_prefix)
示例#21
0
文件: ssa.py 项目: deepin00/specklepy
def coadd_frames(cube, var_cube=None, box=None):
    """Compute the simple shift-and-add (SSA) reconstruction of a data cube.

    This function uses the SSA algorithm to coadd frames of a cube. If provided, this function coadds the variances
    within a var cube considering the exact same shifts.

    Args:
        cube (np.ndarray, ndim=3):
            Data cube which is integrated along the zero-th axis.
        var_cube (np.ndarray, ndim=3, optional):
            Data cube of variances which is integrated along the zero-th axis with the same shifts as the cube.
        box (Box object, optional):
            Constraining the search for the intensity peak to the specified box. Searching the full frames if not
            provided.

    Returns:
        coadded (np.ndarray, ndim=2):
            SSA-integrated frames of the input cube.
        var_coadded (np.ndarray, ndim=2):
            SSA-integrated variances of the input cube or the variance map itself if provided as a 2D cube.
    """

    if not isinstance(cube, np.ndarray):
        raise SpecklepyTypeError('coadd_frames()',
                                 argname='cube',
                                 argtype=type(cube),
                                 expected='np.ndarray')
    if cube.ndim is not 3:
        raise SpecklepyValueError('coadd_frames()',
                                  argname='cube.ndim',
                                  argvalue=cube.ndim,
                                  expected='3')

    if var_cube is not None:
        if not isinstance(var_cube, np.ndarray):
            raise SpecklepyTypeError('coadd_frames()',
                                     argname='var_cube',
                                     argtype=type(var_cube),
                                     expected='np.ndarray')
        if var_cube.ndim == cube.ndim and var_cube.shape != cube.shape:
            raise SpecklepyValueError('coadd_frames()',
                                      argname='var_cube.shape',
                                      argvalue=str(var_cube.shape),
                                      expected=str(cube.shape))
        elif var_cube.ndim == cube.ndim - 1:
            if var_cube.shape[0] != cube.shape[1] or var_cube.shape[
                    1] != cube.shape[2]:
                raise SpecklepyValueError('coadd_frames()',
                                          argname='var_cube.shape',
                                          argvalue=str(var_cube.shape),
                                          expected=str(cube.shape))

    # Compute shifts
    peak_indizes = np.zeros((cube.shape[0], 2), dtype=int)
    for index, frame in enumerate(cube):
        if box is not None:
            frame = box(frame)
        peak_indizes[index] = np.array(np.unravel_index(
            np.argmax(frame, axis=None), frame.shape),
                                       dtype=int)

    # Compute shifts from indizes
    peak_indizes = peak_indizes.transpose()
    xmean, ymean = np.mean(np.array(peak_indizes), axis=1)
    xmean = int(xmean)
    ymean = int(ymean)
    shifts = np.array([xmean - peak_indizes[0], ymean - peak_indizes[1]])
    shifts = shifts.transpose()

    # Shift frames and add to coadded
    coadded = np.zeros(cube[0].shape)
    pad_vectors, ref_pad_vector = alignment.get_pad_vectors(
        shifts, cube_mode=False, return_reference_image_pad_vector=True)
    for index, frame in enumerate(cube):
        coadded += alignment.pad_array(
            frame,
            pad_vectors[index],
            mode='same',
            reference_image_pad_vector=ref_pad_vector)

    # Coadd variance cube (if not an image itself)
    if var_cube is not None:
        if var_cube.ndim == 3:
            var_coadded = np.zeros(coadded.shape)
            for index, frame in enumerate(var_cube):
                var_coadded += alignment.pad_array(
                    frame,
                    pad_vectors[index],
                    mode='same',
                    reference_image_pad_vector=ref_pad_vector)
        elif var_cube.ndim == 2:
            var_coadded = var_cube
        else:
            raise RuntimeError(
                f"var_cube has unexpected shape: {var_cube.shape}")
    else:
        var_coadded = None

    return coadded, var_coadded
示例#22
0
文件: ssa.py 项目: deepin00/specklepy
def ssa(files,
        mode='same',
        reference_file=None,
        outfile=None,
        in_dir=None,
        tmp_dir=None,
        lazy_mode=True,
        box_indexes=None,
        debug=False,
        **kwargs):
    """Compute the SSA reconstruction of a list of files.

    The simple shift-and-add (SSA) algorithm makes use of the structure of typical speckle patterns, i.e.
    short-exposure point-spread functions (PSFs). These show multiple peaks resembling the diffraction-limited PSF of
    coherent fractions within the telescope aperture. Under good conditions or on small telescopes, there is typically
    one largest coherent atmospheric cell and therefore, speckle PSFs typically show one major intensity peak. The
    algorithm makes use of this fact and identifies the emission peak in a given observation frame, assuming that this
    always belongs to the same star, and aligns all frames on the coordinate of the emission peak.

    See Bates & Cady (1980) for references.

    Args:
        files (list or array_like):
            List of complete paths to the fits files that shall be considered for the SSA reconstruction.
        mode (str):
            Name of the reconstruction mode: In 'same' mode, the reconstruction covers the same field of view of the
            reference file. In 'full' mode, every patch of the sky that is covered by at least one frame will be
            contained in the final reconstruction.
        reference_file (str, int, optional):
            Path to a reference file or index of the file in files, relative to which the shifts are computed. See
            specklepy.core.aligment.get_shifts for details. Default is 0.
        outfile (specklepy.io.recfile, optional):
            Object to write the result to, if provided.
        in_dir (str, optional):
            Path to the files. `None` is substituted by an empty string.
        tmp_dir (str, optional):
            Path of a directory in which the temporary results are stored in.
        lazy_mode (bool, optional):
            Set to False, to enforce the alignment of a single file with respect to the reference file. Default is True.
        box_indexes (list, optional):
            Constraining the search for the intensity peak to the specified box. Searching the full frames if not
            provided.
        debug (bool, optional):
            Show debugging information. Default is False.

    Returns:
        reconstruction (np.ndarray):
            The image reconstruction. The size depends on the mode argument.
    """

    logger.info("Starting SSA reconstruction...")
    # Check parameters
    if not isinstance(files, (list, np.ndarray)):
        if isinstance(files, str):
            files = [files]
        else:
            raise SpecklepyTypeError('ssa()',
                                     argname='files',
                                     argtype=type(files),
                                     expected='list')

    if isinstance(mode, str):
        if mode not in ['same', 'full', 'valid']:
            raise SpecklepyValueError('ssa()',
                                      argname='mode',
                                      argvalue=mode,
                                      expected="'same', 'full' or 'valid'")
    else:
        raise SpecklepyTypeError('ssa()',
                                 argname='mode',
                                 argtype=type(mode),
                                 expected='str')

    if reference_file is None:
        reference_file = files[0]
    elif isinstance(reference_file, int):
        reference_file = files[reference_file]
    elif not isinstance(reference_file, str):
        raise SpecklepyTypeError('ssa()',
                                 argname='reference_file',
                                 argtype=type(reference_file),
                                 expected='str or int')

    if outfile is None:
        pass
    elif isinstance(outfile, str):
        outfile = ReconstructionFile(files=files,
                                     filename=outfile,
                                     cards={"RECONSTRUCTION": "SSA"})
    elif isinstance(outfile, ReconstructionFile):
        pass
    else:
        raise SpecklepyTypeError('ssa()',
                                 argname='outfile',
                                 argtype=type(outfile),
                                 expected='str')

    if in_dir is None:
        in_dir = ''
    reference_file = os.path.join(in_dir, reference_file)

    if tmp_dir is not None:
        if isinstance(tmp_dir, str) and not os.path.isdir(tmp_dir):
            os.makedirs(tmp_dir)

    if not isinstance(lazy_mode, bool):
        raise SpecklepyTypeError('ssa()',
                                 argname='lazy_mode',
                                 argtype=type(lazy_mode),
                                 expected='bool')

    if box_indexes is not None:
        box = Box(box_indexes)
    else:
        box = None

    if 'variance_extension_name' in kwargs.keys():
        var_ext = kwargs['variance_extension_name']
    else:
        var_ext = 'VAR'

    if debug:
        logger.setLevel('DEBUG')
        logger.handlers[0].setLevel('DEBUG')
        logger.info("Set logging level to DEBUG")

    # Align reconstructions if multiple files are provided
    if lazy_mode and len(files) == 1:

        # Do not align just a single file
        with fits.open(os.path.join(in_dir, files[0])) as hdu_list:
            cube = hdu_list[0].data
            if var_ext in hdu_list:
                var_cube = hdu_list[var_ext].data
            else:
                var_cube = None
            reconstruction, reconstruction_var = coadd_frames(
                cube, var_cube=var_cube, box=box)

    else:

        # Compute temporary reconstructions of the individual cubes
        tmp_files = []
        for index, file in enumerate(files):
            with fits.open(os.path.join(in_dir, file)) as hdu_list:
                cube = hdu_list[0].data
                if var_ext in hdu_list:
                    var_cube = hdu_list[var_ext].data
                    logger.debug(
                        f"Found variance extension {var_ext} in file {file}")
                else:
                    logger.debug(
                        f"Did not find variance extension {var_ext} in file {file}"
                    )
                    var_cube = None
                tmp, tmp_var = coadd_frames(cube, var_cube=var_cube, box=box)

            if debug:
                imshow(box(tmp), norm='log')

            tmp_file = os.path.basename(file).replace(".fits", "_ssa.fits")
            tmp_file = os.path.join(tmp_dir, tmp_file)
            logger.info(
                "Saving interim SSA reconstruction of cube to {}".format(
                    tmp_file))
            tmp_file_object = Outfile(tmp_file, data=tmp, verbose=True)

            # Store variance of temporary reconstruction
            if tmp_var is not None:
                tmp_file_object.new_extension(var_ext, data=tmp_var)
                del tmp_var
            tmp_files.append(tmp_file)

        # Align tmp reconstructions and add up
        file_shifts, image_shape = alignment.get_shifts(
            tmp_files,
            reference_file=reference_file,
            return_image_shape=True,
            lazy_mode=True)
        pad_vectors, ref_pad_vector = alignment.get_pad_vectors(
            file_shifts,
            cube_mode=(len(image_shape) == 3),
            return_reference_image_pad_vector=True)

        # Iterate over file-wise reconstructions
        reconstruction = None
        reconstruction_var = None
        for index, file in enumerate(tmp_files):

            # Read data
            with fits.open(file) as hdu_list:
                tmp_image = hdu_list[0].data
                if var_ext in hdu_list:
                    tmp_image_var = hdu_list[var_ext].data
                else:
                    tmp_image_var = None

            # Initialize or co-add reconstructions and var images
            if reconstruction is None:
                reconstruction = alignment.pad_array(
                    tmp_image,
                    pad_vectors[index],
                    mode=mode,
                    reference_image_pad_vector=ref_pad_vector)
                if tmp_image_var is not None:
                    reconstruction_var = alignment.pad_array(
                        tmp_image_var,
                        pad_vectors[index],
                        mode=mode,
                        reference_image_pad_vector=ref_pad_vector)
            else:
                reconstruction += alignment.pad_array(
                    tmp_image,
                    pad_vectors[index],
                    mode=mode,
                    reference_image_pad_vector=ref_pad_vector)
                if tmp_image_var is not None:
                    reconstruction_var += alignment.pad_array(
                        tmp_image_var,
                        pad_vectors[index],
                        mode=mode,
                        reference_image_pad_vector=ref_pad_vector)
    logger.info("Reconstruction finished...")

    # Save the result to an Outfile
    if outfile is not None:
        outfile.data = reconstruction
        if reconstruction_var is not None:
            outfile.new_extension(name=var_ext, data=reconstruction_var)

    # Return reconstruction (and the variance map if computed)
    if reconstruction_var is not None:
        return reconstruction, reconstruction_var
    return reconstruction
示例#23
0
    def identify_sequences(self, source='sky'):
        """Identify observation sequences.

        Args:
            source (str, optional):
                Observation type of the images the shall be used to measure the sky background from. Options are 'sky'
                (default) and 'science'.

        Returns:
            sequences (list of Sequence):
                List of observing sequences.
        """

        # Type check
        if isinstance(source, str):
            if source not in ['sky', 'science']:
                raise SpecklepyValueError('identify sequences',
                                          argname='source',
                                          argvalue=source,
                                          expected="'sky' or 'science'")
        else:
            raise SpecklepyTypeError('identify sequences',
                                     argname='source',
                                     argtype=type(source),
                                     expected='str')

        # Identify the observing sequences
        sequences = []
        for setup in self.setups:
            for object in self.objects:
                # Query names and time stamps of science and sky files
                sky_files = self.filter({
                    'OBSTYPE': source.upper(),
                    'OBJECT': object,
                    'SETUP': setup
                })
                sky_time_stamps = self.filter(
                    {
                        'OBSTYPE': source.upper(),
                        'OBJECT': object,
                        'SETUP': setup
                    },
                    namekey='DATE')
                science_files = self.filter({
                    'OBSTYPE': 'SCIENCE',
                    'OBJECT': object,
                    'SETUP': setup
                })
                science_time_stamps = self.filter(
                    {
                        'OBSTYPE': 'SCIENCE',
                        'OBJECT': object,
                        'SETUP': setup
                    },
                    namekey='DATE')

                # Test the number of source files
                if len(sky_files) == 0:
                    logger.warning(
                        f"Did not find any sky observations for object {object} in setup {setup}. No sky "
                        f"subtraction will be applied!")
                else:
                    # Store the information in a new sequence
                    sequences.append(
                        Sequence(sky_files=sky_files,
                                 science_files=science_files,
                                 file_path=self.in_dir,
                                 sky_time_stamps=sky_time_stamps,
                                 science_time_stamps=science_time_stamps,
                                 source=source,
                                 object=object,
                                 setup=setup))
        return sequences
示例#24
0
def get_shift(image,
              reference_image=None,
              is_fourier_transformed=False,
              mode='correlation',
              debug=False):
    """Estimate the shift between an image and a reference image.

    Estimate the relative shift between an image and a reference image by means of a 2D correlation
    ('correlation' mode) or by comparison of the emission peaks ('peak' or 'maximum' modes).

    Args:
        image (np.ndarray):
            2D array of the image to be shifted.
        reference_image (np.ndarray):
            2D array of the reference image of the shift.
        is_fourier_transformed (bool):
            Indicate whether the reference image is already Fourier transformed. This is implemented to save
            computation by computing that transform only once.
        mode (str, optional):
            Mode of the shift estimate. In 'correlation' mode, a 2D correlation is used to estimate the shift of the
            array. This is computationally much more expensive than the identical 'maximum' or 'peak' modes, which
            simply identify the coordinates of the emission peaks and return the difference. Though these modes may be
            fooled by reference sources of similar brightness. Default is 'correlation'.
        debug (bool, optional):
            Set to True to inspect intermediate results. Default is False.

    Returns:
        shift (tuple):
            Tuple of shift indices for each axis.
    """

    # Check input parameters
    if not isinstance(image, np.ndarray) or image.ndim is not 2:
        raise TypeError(
            f"Image input must be 2D numpy.ndarray, but was provided as {type(image)}"
        )
    if not isinstance(reference_image, np.ndarray) or image.ndim is not 2:
        raise TypeError(
            f"Image input must be 2D numpy.ndarray, but was provided as {type(reference_image)}"
        )
    if not isinstance(is_fourier_transformed, bool):
        raise SpecklepyTypeError('get_shift()',
                                 argname='is_Fourier_transformed',
                                 argtype=type(is_fourier_transformed),
                                 expected='bool')
    if isinstance(mode, str):
        if mode not in ['correlation', 'maximum', 'peak']:
            raise SpecklepyValueError(
                'get_shift()',
                argname='mode',
                argvalue=mode,
                expected="'correlation', 'maximum' or 'peak'")
    else:
        raise SpecklepyTypeError('get_shift()',
                                 argname='mode',
                                 argtype=type(mode),
                                 expected='str')

    # Simple comparison of the peaks in the images
    if mode == 'maximum' or mode == 'peak':
        peak_image = np.unravel_index(np.argmax(image, axis=None), image.shape)
        peak_ref_image = np.unravel_index(
            np.argmax(reference_image, axis=None), reference_image.shape)
        return peak_ref_image[0] - peak_image[0], peak_ref_image[
            1] - peak_image[1]

    # Using correlation of the two images
    elif mode == 'correlation':
        # Get the Fourier transformed reference image for cross-correlation
        if not is_fourier_transformed:
            f_reference_image = np.fft.fft2(reference_image)
        else:
            f_reference_image = reference_image

        # Fourier transform the image
        f_image = np.conjugate(np.fft.fft2(image))

        # Compute the 2-dimensional correlation
        correlation = np.fft.ifft2(np.multiply(f_reference_image, f_image))
        correlation = np.fft.fftshift(correlation)
        if debug:
            imshow(np.abs(correlation), title='FFT shifted correlation')

        # Derive the shift from the correlation
        shift = np.unravel_index(np.argmax(correlation), correlation.shape)
        shift = tuple(x - int(correlation.shape[i] / 2)
                      for i, x in enumerate(shift))
        return shift
示例#25
0
def get_shifts(files,
               reference_file=None,
               mode='correlation',
               lazy_mode=True,
               return_image_shape=False,
               in_dir=None,
               debug=False):
    """Computes the the relative shift of data cubes relative to a reference
    image.

    This function iterates over a list of files and uses the module function get_shift in 'correlation' mode to compute
    the relative shifts of files with respect to a reference file.

    Args:
        files (list or array_like):
            List of files to align.
        reference_file (str, int, optional):
            Path to a reference file or index of the file in files, relative to which the shifts are computed. Default
            is 0.
        mode (str, optional):
            Mode of the shift estimate. In 'correlation' mode, a 2D correlation is used to estimate the shift of the
            array. This is computationally much more expensive than the identical 'maximum' or 'peak' modes, which
            simply identify the coordinates of the emission peaks and return the difference. Though these modes may be
            fooled by reference sources of similar brightness. Passed to get_shift() function. Default is 'correlation'.
        lazy_mode (bool, optional):
            Set to False, to enforce the alignment of a single file with respect to the reference file. Default is True.
        return_image_shape (bool, optional):
            Set to True for for returning the shape of the anticipated output image. Default is False.
        in_dir (str, optional):
            Path to the files. `None` is substituted by an empty string.
        debug (bool, optional):
            If set to True, it shows the 2D correlation.

    Returns:
        shifts (list):
            List of shifts for each file relative to the reference file.
    """

    # Check input parameters
    if not isinstance(files, (list, np.ndarray)):
        if isinstance(files, str):
            files = [files]
        else:
            raise SpecklepyTypeError('get_shifts()',
                                     argname='files',
                                     argtype=type(files),
                                     expected='list')

    if reference_file is None:
        reference_file = files[0]
    elif isinstance(reference_file, int):
        reference_file = files[reference_file]
    elif not isinstance(reference_file, str):
        raise SpecklepyTypeError('get_shifts()',
                                 argname='reference_file',
                                 argtype=type(reference_file),
                                 expected='str')

    if isinstance(mode, str):
        if mode not in ['correlation', 'maximum', 'peak']:
            raise SpecklepyValueError(
                'get_shifts()',
                argname='mode',
                argvalue=mode,
                expected="'correlation', 'maximum' or 'peak'")
    else:
        raise SpecklepyTypeError('get_shifts()',
                                 argname='mode',
                                 argtype=type(mode),
                                 expected='str')

    if not isinstance(lazy_mode, bool):
        raise SpecklepyTypeError('get_shifts()',
                                 argname='lazy_mode',
                                 argtype=type(lazy_mode),
                                 expected='bool')

    if not isinstance(return_image_shape, bool):
        raise SpecklepyTypeError('get_shifts()',
                                 argname='return_image_shape',
                                 argtype=type(return_image_shape),
                                 expected='bool')

    if in_dir is None:
        in_dir = ''

    # Skip computations if only one file is provided
    if lazy_mode and len(files) == 1:
        logger.info("Only one data cube is provided, nothing to align.")
        shifts = [(0, 0)]
        image_shape = fits.getdata(os.path.join(in_dir, files[0])).shape
        image_shape = (image_shape[-2], image_shape[-1])

    # Otherwise estimate shifts
    else:
        shifts = []

        # Identify reference file and Fourier transform the integrated image
        logger.info(
            f"Computing relative shifts between data cubes. Reference file is {reference_file}"
        )
        reference_image = fits.getdata(os.path.join(in_dir, reference_file))
        if reference_image.ndim == 3:
            # Integrating over time axis if reference image is a cube
            reference_image = np.sum(reference_image, axis=0)
        f_reference_image = np.fft.fft2(reference_image)
        image_shape = reference_image.shape
        del reference_image

        # Iterate over files and estimate shift via 2D correlation of the integrated cubes
        for index, file in enumerate(files):
            if file == reference_file:
                shift = (0, 0)
            else:
                image = fits.getdata(os.path.join(in_dir, file))
                if image.ndim == 3:
                    image = np.sum(image, axis=0)
                shift = get_shift(image,
                                  reference_image=f_reference_image,
                                  is_fourier_transformed=True,
                                  mode=mode,
                                  debug=debug)
            shifts.append(shift)
            logger.info(f"Identified a shift of {shift} for file {file}")
        logger.info(f"Identified the following shifts:\n\t{shifts}")

    if return_image_shape:
        return shifts, image_shape
    else:
        return shifts
示例#26
0
    def __init__(self, in_files, mode='same', reference_image=None, out_file=None, in_dir=None, tmp_dir=None,
                 alignment_method='collapse', var_ext=None, box_indexes=None, debug=False):
        """Create a Reconstruction instance.

        Args:
            in_files (list):
                List of input data cubes.
            mode (str, optional):
                Reconstruction mode, defines the final image size and can be `full`, `same` and `valid`. The final image
                sizes is derived as follows:
                - `full`:
                    The reconstruction image covers every patch of the sky that is covered by at least one frame in the
                    input data.
                - `same`:
                    The reconstruction image covers the same field of view as the image in the reference file.
                - `valid`:
                    The reconstruction image covers only that field that is covered by all images in the input files.
            reference_image (int or str, optional):
                The index in the `in_files` list or the name of the image serving as reference in 'same' mode.
            out_file (str, optional):
                Name of an output file to store the reconstructed image in.
            in_dir (str, optional):
                Path to the `in_files`.
            tmp_dir (str, optional):
                Path to the directory for storing temporary products.
            debug (bool, optional):
                Show debugging information.
        """

        # Check input parameter types
        if not isinstance(in_files, (list, np.ndarray)):
            raise SpecklepyTypeError('Reconstruction', 'in_files', type(in_files), 'list')
        if not isinstance(mode, str):
            raise SpecklepyTypeError('Reconstruction', 'mode', type(mode), 'str')
        if out_file is not None and not isinstance(out_file, str):
            raise SpecklepyTypeError('Reconstruction', 'out_file', type(out_file), 'str')

        # Check input parameter values
        if mode not in self.supported_modes:
            raise SpecklepyValueError('Reconstruction', 'mode', mode, f"in {self.supported_modes}")

        # Store input data
        self.in_files = in_files
        self.mode = mode
        self.out_file = out_file if out_file is not None else 'reconstruction.fits'
        self.reference_image = reference_image if reference_image is not None else 0
        self.in_dir = in_dir if in_dir is not None else ''
        self.tmp_dir = tmp_dir if tmp_dir is not None else ''
        self.var_ext = var_ext  # if var_ext is not None else 'VAR'
        self.box = Box(box_indexes) if box_indexes is not None else None

        # Retrieve name of reference file
        self.reference_file = self.identify_reference_file()

        # Derive shape of individual input frames
        single_cube_mode = len(self.in_files) == 1
        example_frame = fits.getdata(os.path.join(in_dir, self.in_files[0]))
        if example_frame.ndim == 3:
            example_frame = example_frame[0]
        self.frame_shape = example_frame.shape

        # Initialize image
        if single_cube_mode:
            self.image = np.zeros(self.frame_shape)
            self.shifts = (0, 0)
        else:
            # Compute SSA reconstructions of cubes or collapse cubes for initial alignments
            self.long_exp_files = self.create_long_exposures(alignment_method=alignment_method)

            # Identify reference tmp file
            self.reference_tmp_file = self.identify_reference_long_exposure_file()

            # Estimate relative shifts
            self.shifts = alignment.get_shifts(files=self.long_exp_files, reference_file=self.reference_tmp_file,
                                               lazy_mode=True, return_image_shape=False, in_dir=tmp_dir, debug=debug)

            # Derive corresponding padding vectors
            self.pad_vectors, self.reference_pad_vector = \
                alignment.get_pad_vectors(shifts=self.shifts, cube_mode=False, return_reference_image_pad_vector=True)

            # Derive corresponding image sizes
            self.image = self.initialize_image()

        # Initialize the variance map
        self.var = np.zeros(self.image.shape) if self.var_ext is not None else None

        # Initialize output file and create an extension for the variance
        self.out_file = ReconstructionFile(files=self.in_files, filename=self.out_file, shape=self.image.shape,
                                      in_dir=in_dir, cards={"RECONSTRUCTION": "SSA"})
        if self.var is not None:
            self.out_file.new_extension(name=self.var_ext, data=self.var)
示例#27
0
    def __init__(self,
                 file,
                 data=None,
                 prefix=None,
                 path=None,
                 reduction=None,
                 last_reduction=None,
                 header_card_prefix="HIERARCH SPECKLEPY"):
        """Class that carries the link to a file for data reduction products.

        Args:
            file (str):
                Input file that will be serve as a source for the header and raw data.
            data (np.ndarray, optional):
                Data that will be stored as the new PrimaryHDU.
            prefix (str, optional):
                Prefix that will combined with the 'file' argument to the name of the new file.
            path (str, optional):
                Target path under which the new file will be stored.
            reduction (str, optional):
                Current reduction step, will be stored to the file header.
            last_reduction (str, optional):
                Last reduction step, will be used as a name for the new fits extension.
            header_card_prefix (str, optional):
                Default prefix for header cards.
        """

        # Check input parameters
        if isinstance(file, str):
            self.parent_file = file
        else:
            raise SpecklepyTypeError('ReductionFile',
                                     argname='file',
                                     argtype=type(file),
                                     expected='str')

        if data is None or isinstance(data, np.ndarray):
            self._data = data
        else:
            raise SpecklepyTypeError('ReductionFile',
                                     argname='data',
                                     argtype=type(data),
                                     expected='np.ndarray')

        if prefix is None:
            self.prefix = ""
        elif isinstance(prefix, str):
            self.prefix = prefix
        else:
            raise SpecklepyTypeError('ReductionFile',
                                     argname='prefix',
                                     argtype=type(prefix),
                                     expected='str')

        if path is None or isinstance(path, str):
            self.path = path
        else:
            raise SpecklepyTypeError('ReductionFile',
                                     argname='path',
                                     argtype=type(path),
                                     expected='str')

        if reduction is None or isinstance(reduction, str):
            self.reduction = reduction
        else:
            raise SpecklepyTypeError('ReductionFile',
                                     argname='reduction',
                                     argtype=type(reduction),
                                     expected='str')

        if last_reduction is None:
            self.last_reduction = "RAW"
        elif isinstance(last_reduction, str):
            self.last_reduction = last_reduction
        else:
            raise SpecklepyTypeError('ReductionFile',
                                     argname='last_reduction',
                                     argtype=type(last_reduction),
                                     expected='str')

        if header_card_prefix is None or isinstance(header_card_prefix, str):
            self.header_card_prefix = header_card_prefix
        else:
            raise SpecklepyTypeError('ReductionFile',
                                     argname='header_card_prefix',
                                     argtype=type(header_card_prefix),
                                     expected='str')

        # Create file name
        self.filename = self.prefix + os.path.basename(
            self.parent_file)  # Make sure to get rid of the path

        # Read header information and data from parent file
        with fits.open(self.parent_file) as hdu_list:
            # Copy parent file data into extensions
            extensions = []
            for hdu in hdu_list:
                if hdu.name == 'PRIMARY':
                    name = self.last_reduction
                else:
                    name = hdu.name
                ext = {'name': name, 'data': hdu.data, 'header': hdu.header}
                extensions.append(ext)

            # Construct primary HDU
            header = hdu_list[0].header

            if 'PIPELINE' not in header.keys():
                # Parent file is not a ReductionFile
                header.set('PIPELINE', 'SPECKLEPY')
            header.set(reduction,
                       str(datetime.now()))  # Store the current reduction step

            if self._data is None:
                primary_data = hdu_list[0].data
            else:
                primary_data = self._data
            del self._data

        # Pass to OutFile class
        super().__init__(filename=self.filename,
                         path=self.path,
                         data=primary_data,
                         extensions=extensions,
                         header=header,
                         cards=None,
                         timestamp=False,
                         header_card_prefix=header_card_prefix)
示例#28
0
def identify_setups(filelist, keywords, return_setups=False):
    """Identify distinct observational setups in a list of files.

    This function identifies distinct observational setups.

    Args:
        filelist (astropy.table.Table):
        keywords (list of str):
        return_setups (bool): Set True to return also the dict of distinct setups. Default is False.
    """

    # Check input parameters
    if not isinstance(filelist, Table):
        raise SpecklepyTypeError('identify_setups', 'filelist', type(filelist),
                                 'astropy.table.Table')

    if not isinstance(keywords, list):
        raise SpecklepyTypeError('identify_setups', 'filelist', type(filelist),
                                 'list')

    if not isinstance(return_setups, bool):
        raise SpecklepyTypeError('identify_setups', 'return_setups',
                                 type(return_setups), 'bool')

    # Identifying setups key-by-key
    logging.info(
        "Identifying distinct observational setups in the file list...")
    filelist['Setup'] = [None] * len(filelist['FILE'])

    for key in keywords:
        unique = np.unique(filelist[key].data)
        logging.info("Identified {} setups by keyword {}:".format(
            len(unique), key))
        logging.info("\t{}".format(unique))
        if len(unique) == 1:
            continue

        for index, setup in enumerate(unique):
            for row in filelist:
                if row[key] == setup:
                    if row['Setup'] is None:
                        row['Setup'] = [str(index)]
                    else:
                        row['Setup'].append(str(index))

    for row in filelist:
        row['Setup'] = ''.join(row['Setup'])

    # Overwrite setup keys by length-1 string
    combinations = np.unique(filelist['Setup'].data)
    for index, combination in enumerate(combinations):
        row_indizes = np.where(filelist['Setup'].data == combination)
        filelist['Setup'][row_indizes] = string.ascii_uppercase[index]

    # Return
    if return_setups:
        # Create setups dict
        setups_dict = {}
        return filelist, setups_dict
    else:
        return filelist
示例#29
0
def pad_array(array, pad_vector, mode='same', reference_image_pad_vector=None):
    """Pads an array according to the pad_vector and crops the image given the
    mode.

    Pads an array with zeros to match a desired field size. Intermediately, it always creates a 'full' image and only
    in 'same' mode it crops the edges such that the returned array covers only the field of the reference image.

    Args:
        array (np.ndarray):
            Input array that shall be padded to match the 'full' or 'same' fields.
        pad_vector (list):
            List of padding vectors, as obtained from get_pad_vectors().
        mode (str, optional):
            Define the size of the output image as 'same' to the reference image or expanding to include the 'full'
            covered field.
        reference_image_pad_vector (tuple or list, optional):
            Used in `same` mode to estimate the position of the reference image and crop beyond.

    Returns:
        padded (np.ndarray):
            Padded array, matching the field of the reference image in 'same'
            mode, or the complete field in 'full' mode.
    """

    # Check input parameters
    if isinstance(array, np.ndarray):
        if array.ndim not in [2, 3]:
            raise SpecklepyValueError('pad_array()',
                                      argname='array.ndim',
                                      argvalue=array.ndim,
                                      expected='2 or 3')
    else:
        raise SpecklepyTypeError('pad_array()',
                                 argname='array',
                                 argtype=type(array),
                                 expected='np.ndarray')

    #
    padded = np.pad(array, pad_vector, mode='constant')

    # Crop the image according to the desired mode
    if mode == 'same':
        # Take reference pad vector and adapt to correctly limit the image
        _r = reference_image_pad_vector
        # Pick only those pixels, covered by the reference image
        if array.ndim == 2:
            padded = padded[_r[0][0]:_adapt_max_coordinate(_r[0][1]),
                            _r[1][0]:_adapt_max_coordinate(_r[1][1])]
        else:
            padded = padded[:, _r[0][0]:_adapt_max_coordinate(_r[0][1]),
                            _r[1][0]:_adapt_max_coordinate(_r[1][1])]

    elif mode == 'full':
        # There is nothing to crop in 'full' mode
        pass

    elif mode == 'valid':
        raise NotImplementedError(
            "specklepy.core.alignment.pad_array does not support the 'valid' mode yet!"
        )

    return padded
示例#30
0
    def __init__(self,
                 file_list,
                 in_dir=None,
                 out_dir=None,
                 out_prefix=None,
                 **kwargs):
        """Create a FileArchive instance.

        Long description...

        Args:
            file_list (str, list):
                Path to list of files or generic file path. Can also be provided as list type.
            in_dir (str, optional):
                Path to the raw/ input data.
            out_dir (str, optional):
                Path to the product/ output data.
            out_prefix (str, optional):
                Prefix of the product/ output data.
        """

        # Store in and out paths
        if in_dir is None:
            self.in_dir = './'
        else:
            self.in_dir = in_dir
        if out_dir is None:
            self.out_dir = './'
        else:
            self.out_dir = out_dir
        if out_prefix is None:
            self.out_prefix = ''
        else:
            self.out_prefix = out_prefix

        # Interpret the file list input
        if isinstance(file_list, str):
            # Search for files
            files = glob.glob(file_list)
            files.sort()
            if len(files) == 0:
                sys.tracebacklimit = 0
                raise FileNotFoundError(
                    "FileArchive did not find any file matching to{!r}.".
                    format(file_list))
            else:
                logger.info(
                    "FileArchive found {} file(s) matching to {!r}.".format(
                        len(files), file_list))

            if len(files) == 1 and not self.is_fits_file(files[0]):
                logger.info(
                    "Input file is not fits type. FileArchive assumes that input file {!r} contains file "
                    "names.".format(files[0]))
                self.table = self.read_table_file(files[0])
            else:
                self.table = self.gather_table_from_list(files=files, **kwargs)
                self.in_dir = os.path.dirname(files[0])

        elif isinstance(file_list, list):
            logger.info("FileArchive received a list of files.")
            self.table = self.gather_table_from_list(files=file_list, **kwargs)

        else:
            raise SpecklepyTypeError("FileArchive", 'file_list',
                                     type(file_list), 'str')

        # Log identified input files
        logger.debug("FileArchive lists the following files:")
        logger.debug(str(self.table))

        # Initialize the index for iteration
        self.index = 0

        # Initialize the list of product files
        self.product_files = None