Esempio n. 1
0
def save_rgb_bg_fits(rgb_bg_data,
                     output_filename,
                     header=None,
                     fpack=True,
                     overwrite=True):
    """Save a FITS file containing a combined background as well as separate channels.

    Args:
        rgb_bg_data (list[photutils.Background2D]): The RGB background data as
            returned by calling `panoptes.utils.images.bayer.get_rgb_background`
            with `return_separate=True`.
        output_filename (str): The output name for the FITS file.
        header (astropy.io.fits.Header): FITS header to be saved with the file.
        fpack (bool): If the FITS file should be compressed, default True.
        overwrite (bool): If FITS file should be overwritten, default True.
    """

    # Get combined data for Primary HDU
    combined_bg = np.array([
        np.ma.array(data=d.background, mask=d.mask).filled(0)
        for d in rgb_bg_data
    ]).sum(0)

    header = header or fits.Header()

    # Save as ing16.
    header['BITPIX'] = 16

    # Combined background is primary hdu.
    primary = fits.PrimaryHDU(combined_bg, header=header)
    primary.scale('int16')
    hdu_list = [primary]

    for color, bg in zip(RGB, rgb_bg_data):
        h0 = fits.Header()
        h0['COLOR'] = f'{color.name.lower()}'

        h0['IMGTYPE'] = 'background'
        img0 = fits.ImageHDU(bg.background, header=h0)
        img0.scale('int16')
        hdu_list.append(img0)

        h0['IMGTYPE'] = 'background_rms'
        img1 = fits.ImageHDU(bg.background_rms, header=h0)
        img1.scale('int16')
        hdu_list.append(img1)

    hdul = fits.HDUList(hdu_list)
    hdul.writeto(output_filename, overwrite=overwrite)

    if fpack:
        output_filename = fits_utils.fpack(output_filename)

    return output_filename
def test_no_overwrite_fpack(solved_fits_file):
    new_file = solved_fits_file.replace('solved', 'solved_copy')
    copy_file = shutil.copyfile(solved_fits_file, new_file)

    # Unpack the file. This removes the packed version.
    uncompressed = fits_utils.funpack(copy_file)

    # Copy file again so now the packed version exists alongside unpacked.
    copy_file = shutil.copyfile(solved_fits_file, new_file)

    # Deny overwriting gives error.
    with pytest.raises(FileExistsError):
        _ = fits_utils.fpack(uncompressed, overwrite=False)

    # Default is overwrite=True.
    compressed = fits_utils.fpack(uncompressed)

    # Cleanup test.
    for file in [copy_file, uncompressed, compressed]:
        with suppress(FileNotFoundError):
            os.remove(file)
def test_fpack(solved_fits_file):
    new_file = solved_fits_file.replace('solved', 'solved_copy')
    copy_file = shutil.copyfile(solved_fits_file, new_file)
    info = os.stat(copy_file)
    assert info.st_size > 0.

    uncompressed = fits_utils.funpack(copy_file, verbose=True)
    assert os.stat(uncompressed).st_size > info.st_size

    compressed = fits_utils.fpack(uncompressed, verbose=True)
    assert os.stat(compressed).st_size == info.st_size

    os.remove(copy_file)
Esempio n. 4
0
def clean_observation_dir(dir_name,
                          remove_jpgs=False,
                          include_timelapse=True,
                          timelapse_overwrite=False,
                          **kwargs):
    """Clean an observation directory.
    For the given `dir_name`, will:
        * Compress FITS files
        * Remove `.solved` files
        * Create timelapse from JPG files if present (optional, default True)
        * Remove JPG files (optional, default False).
    Args:
        dir_name (str): Full path to observation directory.
        remove_jpgs (bool, optional): If JPGs should be removed after making timelapse,
            default False.
        include_timelapse (bool, optional): If a timelapse should be created, default True.
        timelapse_overwrite (bool, optional): If timelapse file should be overwritten,
            default False.
        **kwargs: Can include `verbose`.
    """
    def _glob(s):
        return glob(os.path.join(dir_name, s))

    logger.info("Cleaning dir: {}".format(dir_name))

    # Pack the fits files
    logger.debug("Packing FITS files")
    for f in _glob('*.fits'):
        try:
            fits_utils.fpack(f)
        except Exception as e:  # pragma: no cover
            logger.warning('Could not compress fits file: {!r}'.format(e))

    # Remove .solved files
    logger.debug('Removing .solved files')
    for f in _glob('*.solved'):
        with suppress(OSError):
            os.remove(f)

    try:
        jpg_list = _glob('*.jpg')

        if len(jpg_list) > 0:

            # Create timelapse
            if include_timelapse:
                try:
                    logger.debug('Creating timelapse for {}'.format(dir_name))
                    video_file = make_timelapse(dir_name,
                                                overwrite=timelapse_overwrite)
                    logger.debug('Timelapse created: {}'.format(video_file))
                except Exception as e:
                    logger.debug("Problem creating timelapse: {}".format(e))

            # Remove jpgs
            if remove_jpgs:
                logger.debug('Removing jpgs')
                for f in jpg_list:
                    with suppress(OSError):
                        os.remove(f)
    except Exception as e:
        logger.warning(
            'Problem with cleanup creating timelapse: {!r}'.format(e))
Esempio n. 5
0
    def process_exposure(self,
                         metadata,
                         observation_event,
                         compress_fits=None,
                         record_observations=None,
                         make_pretty_images=None):
        """ Processes the exposure.

        Performs the following steps:

            1. First checks to make sure that the file exists on the file system.
            2. Calls `_process_fits` with the filename and info, which is specific to each camera.
            3. Makes pretty images if requested.
            4. Records observation metadata if requested.
            5. Compresses FITS files if requested.
            6. Sets the observation_event.

        If the camera is a primary camera, extract the jpeg image and save metadata to database
        `current` collection. Saves metadata to `observations` collection for all images.

        Args:
            metadata (dict): Header metadata saved for the image
            observation_event (threading.Event): An event that is set signifying that the
                camera is done with this exposure
            compress_fits (bool or None): If FITS files should be fpacked into .fits.fz.
                If None (default), checks the `observations.compress_fits` config-server key.
            record_observations (bool or None): If observation metadata should be saved.
                If None (default), checks the `observations.record_observations`
                config-server key.
            make_pretty_images (bool or None): If should make a jpg from raw image.
                If None (default), checks the `observations.make_pretty_images`
                config-server key.

        Raises:
            FileNotFoundError: If the FITS file isn't at the specified location.
        """
        # Wait for exposure to complete. Timeout handled by exposure thread.
        while self.is_exposing:
            time.sleep(1)

        self.logger.debug(
            f'Starting exposure processing for {observation_event}')

        if compress_fits is None:
            compress_fits = self.get_config('observations.compress_fits',
                                            default=False)

        if make_pretty_images is None:
            make_pretty_images = self.get_config(
                'observations.make_pretty_images', default=False)

        image_id = metadata['image_id']
        seq_id = metadata['sequence_id']
        file_path = metadata['file_path']
        exptime = metadata['exptime']
        field_name = metadata['field_name']

        # Make sure image exists.
        if not os.path.exists(file_path):
            observation_event.set()
            raise FileNotFoundError(
                f"Expected image at file_path={file_path!r} does not exist or "
                + "cannot be accessed, cannot process.")

        self.logger.debug(f'Starting FITS processing for {file_path}')
        file_path = self._process_fits(file_path, metadata)
        self.logger.debug(f'Finished FITS processing for {file_path}')

        # TODO make this async and take it out of camera.
        if make_pretty_images:
            try:
                image_title = f'{field_name} [{exptime}s] {seq_id}'

                self.logger.debug(
                    f"Making pretty image for file_path={file_path!r}")
                link_path = None
                if metadata['is_primary']:
                    # This should be in the config somewhere.
                    link_path = os.path.expandvars('$PANDIR/images/latest.jpg')

                img_utils.make_pretty_image(file_path,
                                            title=image_title,
                                            link_path=link_path)
            except Exception as e:  # pragma: no cover
                self.logger.warning(
                    f'Problem with extracting pretty image: {e!r}')

        metadata['exptime'] = get_quantity_value(metadata['exptime'],
                                                 unit='seconds')

        if record_observations:
            self.logger.debug(f"Adding current observation to db: {image_id}")
            self.db.insert_current('observations', metadata)

        if compress_fits:
            self.logger.debug(f'Compressing file_path={file_path!r}')
            compressed_file_path = fits_utils.fpack(file_path)
            self.logger.debug(f'Compressed {compressed_file_path}')

        # Mark the event as done
        observation_event.set()