예제 #1
0
파일: bias.py 프로젝트: rachel3834/banzai
    def make_master_calibration_frame(self, images, image_config,
                                      logging_tags):

        bias_data = np.zeros((image_config.ny, image_config.nx, len(images)),
                             dtype=np.float32)
        bias_mask = np.zeros((image_config.ny, image_config.nx, len(images)),
                             dtype=np.uint8)
        bias_level_array = np.zeros(len(images), dtype=np.float32)

        master_bias_filename = self.get_calibration_filename(image_config)
        logs.add_tag(logging_tags, 'master_bias',
                     os.path.basename(master_bias_filename))
        for i, image in enumerate(images):
            bias_level_array[i] = stats.sigma_clipped_mean(image.data,
                                                           3.5,
                                                           mask=image.bpm)

            logs.add_tag(logging_tags, 'filename',
                         os.path.basename(image.filename))
            logs.add_tag(logging_tags, 'BIASLVL', float(bias_level_array[i]))
            self.logger.debug('Calculating bias level', extra=logging_tags)
            # Subtract the bias level for each image
            bias_data[:, :, i] = image.data[:, :] - bias_level_array[i]
            bias_mask[:, :, i] = image.bpm[:, :]

        mean_bias_level = stats.sigma_clipped_mean(bias_level_array, 3.0)

        master_bias = stats.sigma_clipped_mean(bias_data,
                                               3.0,
                                               axis=2,
                                               mask=bias_mask,
                                               inplace=True)

        del bias_data
        del bias_mask

        master_bpm = np.array(master_bias == 0.0, dtype=np.uint8)

        header = fits_utils.create_master_calibration_header(images)

        header['BIASLVL'] = (mean_bias_level, 'Mean bias level of master bias')
        master_bias_image = Image(self.pipeline_context,
                                  data=master_bias,
                                  header=header)
        master_bias_image.filename = master_bias_filename
        master_bias_image.bpm = master_bpm

        logs.pop_tag(logging_tags, 'master_bias')
        logs.add_tag(logging_tags, 'filename',
                     os.path.basename(master_bias_image.filename))
        logs.add_tag(logging_tags, 'BIASLVL', mean_bias_level)
        self.logger.debug('Average bias level in ADU', extra=logging_tags)

        return [master_bias_image]
예제 #2
0
파일: dark.py 프로젝트: Fingel/banzai
    def make_master_calibration_frame(self, images, image_config, logging_tags):
        dark_data = np.zeros((images[0].ny, images[0].nx, len(images)), dtype=np.float32)
        dark_mask = np.zeros((images[0].ny, images[0].nx, len(images)), dtype=np.uint8)

        master_dark_filename = self.get_calibration_filename(images[0])

        logs.add_tag(logging_tags, 'master_dark', os.path.basename(master_dark_filename))
        for i, image in enumerate(images):
            logs.add_tag(logging_tags, 'filename', os.path.basename(image.filename))
            self.logger.debug('Combining dark', extra=logging_tags)

            dark_data[:, :, i] = image.data[:, :]
            dark_data[:, :, i] /= image.exptime
            dark_mask[:, :, i] = image.bpm[:, :]

        master_dark = stats.sigma_clipped_mean(dark_data, 3.0, axis=2, mask=dark_mask, inplace=True)

        # Memory cleanup
        del dark_data
        del dark_mask

        master_bpm = np.array(master_dark == 0.0, dtype=np.uint8)
        master_dark[master_bpm] = 0.0

        # Save the master dark image with all of the combined images in the header
        master_dark_header = fits_utils.create_master_calibration_header(images)
        master_dark_image = Image(self.pipeline_context, data=master_dark,
                                  header=master_dark_header)
        master_dark_image.filename = master_dark_filename
        master_dark_image.bpm = master_bpm

        logs.pop_tag(logging_tags, 'master_dark')
        logs.add_tag(logging_tags, 'filename', os.path.basename(master_dark_image.filename))
        self.logger.info('Created master dark', extra=logging_tags)
        return [master_dark_image]
예제 #3
0
    def make_master_calibration_frame(self, images):
        # Sort the images by reverse observation date, so that the most recent one
        # is used to create the filename and select the day directory
        images.sort(key=lambda image: image.dateobs, reverse=True)

        data_stack = np.zeros((images[0].ny, images[0].nx, len(images)), dtype=np.float32)
        stack_mask = np.zeros((images[0].ny, images[0].nx, len(images)), dtype=np.uint8)

        make_calibration_name = settings.CALIBRATION_FILENAME_FUNCTIONS[self.calibration_type]

        master_calibration_filename = make_calibration_name(images[0])

        for i, image in enumerate(images):
            logger.debug('Stacking Frames', image=image,
                         extra_tags={'master_calibration': os.path.basename(master_calibration_filename)})
            data_stack[:, :, i] = image.data[:, :]
            stack_mask[:, :, i] = image.bpm[:, :]

        stacked_data = stats.sigma_clipped_mean(data_stack, 3.0, axis=2, mask=stack_mask, inplace=True)

        # Memory cleanup
        del data_stack
        del stack_mask

        master_bpm = np.array(stacked_data == 0.0, dtype=np.uint8)

        # Save the master dark image with all of the combined images in the header
        master_header = create_master_calibration_header(images[0].header, images)
        master_image = FRAME_CLASS(self.runtime_context, data=stacked_data, header=master_header)
        master_image.filename = master_calibration_filename
        master_image.bpm = master_bpm

        logger.info('Created master calibration stack', image=master_image,
                    extra_tags={'calibration_type': self.calibration_type})
        return master_image
예제 #4
0
def calculate_rv(image, orders_to_use, template):
    # for steps in 1 km/s from -2000 to +2000 km/s
    coarse_velocities = np.arange(-2000, 2001, 1) * units.km / units.s

    coarse_ccfs = cross_correlate_over_traces(image, orders_to_use,
                                              coarse_velocities, template)

    # take the peak
    velocity_peaks = np.array([
        coarse_velocities[np.argmax(ccf['xcor'])].to('km / s').value
        for ccf in coarse_ccfs
    ])
    best_v = stats.sigma_clipped_mean(velocity_peaks, 3.0)
    velocities = np.arange(best_v - 2, best_v + 2 + 1e-4,
                           1e-3) * units.km / units.s

    ccfs = cross_correlate_over_traces(image, orders_to_use, velocities,
                                       template)
    # Calculate the peak velocity
    rvs_per_order = np.array(
        [ccf['v'][np.argmax(ccf['xcor'])].to('km / s').value for ccf in ccfs])
    # iterative sigma clipping using robust_standard_deviation to reject outliers and centering on the median.
    rvs_per_order = sigma_clip(rvs_per_order,
                               sigma=4,
                               cenfunc='median',
                               maxiters=5,
                               stdfunc=stats.robust_standard_deviation,
                               axis=None,
                               masked=True,
                               return_bounds=False,
                               copy=True)
    rv = np.ma.mean(rvs_per_order) * units.km / units.s
    rv_err = np.ma.std(rvs_per_order) / np.sqrt(
        np.ma.count(rvs_per_order)) * units.km / units.s
    return rv, rv_err, coarse_ccfs, ccfs
예제 #5
0
파일: flats.py 프로젝트: LCOGT/banzai
 def do_stage(self, image):
     # Get the sigma clipped mean of the central 25% of the image
     flat_normalization = stats.sigma_clipped_mean(image.get_inner_image_section(), 3.5)
     image.data /= flat_normalization
     image.header['FLATLVL'] = flat_normalization
     logger.info('Calculate flat normalization', image=image,
                 extra_tags={'flat_normalization': flat_normalization})
     return image
예제 #6
0
파일: flats.py 프로젝트: rachel3834/banzai
    def make_master_calibration_frame(self, images, image_config, logging_tags):
        flat_data = np.zeros((images[0].ny, images[0].nx, len(images)), dtype=np.float32)
        flat_mask = np.zeros((images[0].ny, images[0].nx, len(images)), dtype=np.uint8)

        quarter_nx = images[0].nx // 4
        quarter_ny = images[0].ny // 4

        master_flat_filename = self.get_calibration_filename(images[0])
        logs.add_tag(logging_tags, 'master_flat', os.path.basename(master_flat_filename))
        for i, image in enumerate(images):

            # Get the sigma clipped mean of the central 25% of the image
            flat_normalization = stats.sigma_clipped_mean(image.data[quarter_ny: -quarter_ny,
                                                                     quarter_nx:-quarter_nx], 3.5)
            flat_data[:, :, i] = image.data[:, :]
            flat_data[:, :, i] /= flat_normalization
            flat_mask[:, :, i] = image.bpm[:, :]
            logs.add_tag(logging_tags, 'filename', os.path.basename(image.filename))
            logs.add_tag(logging_tags, 'flat_normalization', flat_normalization)
            self.logger.debug('Calculating flat normalization', extra=logging_tags)

        logs.pop_tag(logging_tags, 'flat_normalization')
        master_flat = stats.sigma_clipped_mean(flat_data, 3.0, axis=2, mask=flat_mask,
                                               fill_value=1.0, inplace=True)

        master_bpm = np.array(master_flat == 1.0, dtype=np.uint8)

        master_bpm = np.logical_and(master_bpm, master_flat < 0.2)

        master_flat[master_flat < 0.2] = 1.0

        master_flat_header = fits_utils.create_master_calibration_header(images)

        master_flat_image = Image(self.pipeline_context, data=master_flat,
                                  header=master_flat_header)
        master_flat_image.filename = master_flat_filename
        master_flat_image.bpm = master_bpm

        logs.pop_tag(logging_tags, 'master_flat')
        logs.add_tag(logging_tags, 'filename', os.path.basename(master_flat_image.filename))
        self.logger.info('Created master flat', extra=logging_tags)

        return [master_flat_image]
예제 #7
0
파일: flats.py 프로젝트: baulml/banzai
 def do_stage(self, image):
     # Get the sigma clipped mean of the central 25% of the image
     flat_normalization = stats.sigma_clipped_mean(
         image.get_inner_image_section(), 3.5)
     image.data /= flat_normalization
     image.header['FLATLVL'] = flat_normalization
     logger.info('Calculate flat normalization',
                 image=image,
                 extra_tags={'flat_normalization': flat_normalization})
     return image
예제 #8
0
def test_makes_a_sensible_master_dark(mock_frame):
    nimages = 20
    images = [FakeDarkImage() for x in range(nimages)]
    for i, image in enumerate(images):
        image.data = np.ones((image.ny, image.nx)) * i

    expected_master_dark = stats.sigma_clipped_mean(np.arange(nimages), 3.0)

    maker = DarkMaker(FakeContext(frame_class=FakeDarkImage))
    stacked_images = maker.do_stage(images)
    assert (stacked_images[0].data == expected_master_dark).all()
예제 #9
0
파일: bias.py 프로젝트: nvolgenau/banzai
def _subtract_overscan_2d(image):
    overscan_region = fits_utils.parse_region_keyword(image.header.get('BIASSEC'))
    if overscan_region is not None:
        overscan_level = stats.sigma_clipped_mean(image.data[overscan_region], 3)
        image.header['L1STATOV'] = (1, 'Status flag for overscan correction')
    else:
        overscan_level = 0.0
        image.header['L1STATOV'] = (0, 'Status flag for overscan correction')

    image.header['OVERSCAN'] = (overscan_level, 'Overscan value that was subtracted')
    image.data -= overscan_level
    return overscan_level
예제 #10
0
파일: bias.py 프로젝트: baulml/banzai
def _subtract_overscan_2d(image):
    overscan_region = fits_utils.parse_region_keyword(image.header.get('BIASSEC'))
    if overscan_region is not None:
        overscan_level = stats.sigma_clipped_mean(image.data[overscan_region], 3)
        image.header['L1STATOV'] = (1, 'Status flag for overscan correction')
    else:
        overscan_level = 0.0
        image.header['L1STATOV'] = (0, 'Status flag for overscan correction')

    image.header['OVERSCAN'] = (overscan_level, 'Overscan value that was subtracted')
    image.data -= overscan_level
    return overscan_level
예제 #11
0
def test_makes_a_sensible_master_dark(mock_frame, mock_namer):
    mock_namer.return_value = lambda *x: 'foo.fits'
    nimages = 20
    images = [FakeDarkImage() for x in range(nimages)]
    for i, image in enumerate(images):
        image.data = np.ones((image.ny, image.nx)) * i

    expected_master_dark = stats.sigma_clipped_mean(np.arange(nimages), 3.0)

    maker = DarkMaker(FakeContext(frame_class=FakeDarkImage))
    stacked_images = maker.do_stage(images)
    assert (stacked_images[0].data == expected_master_dark).all()
예제 #12
0
파일: bias.py 프로젝트: nvolgenau/banzai
    def make_master_calibration_frame(self, images, image_config, logging_tags):

        bias_data = np.zeros((image_config.ny, image_config.nx, len(images)), dtype=np.float32)
        bias_mask = np.zeros((image_config.ny, image_config.nx, len(images)), dtype=np.uint8)
        bias_level_array = np.zeros(len(images), dtype=np.float32)

        master_bias_filename = self.get_calibration_filename(image_config)
        logs.add_tag(logging_tags, 'master_bias', os.path.basename(master_bias_filename))
        for i, image in enumerate(images):
            bias_level_array[i] = stats.sigma_clipped_mean(image.data, 3.5, mask=image.bpm)

            logs.add_tag(logging_tags, 'filename', os.path.basename(image.filename))
            logs.add_tag(logging_tags, 'BIASLVL', float(bias_level_array[i]))
            self.logger.debug('Calculating bias level', extra=logging_tags)
            # Subtract the bias level for each image
            bias_data[:, :, i] = image.data[:, :] - bias_level_array[i]
            bias_mask[:, :, i] = image.bpm[:, :]

        mean_bias_level = stats.sigma_clipped_mean(bias_level_array, 3.0)

        master_bias = stats.sigma_clipped_mean(bias_data, 3.0, axis=2, mask=bias_mask, inplace=True)

        del bias_data
        del bias_mask

        master_bpm = np.array(master_bias == 0.0, dtype=np.uint8)

        header = fits_utils.create_master_calibration_header(images)

        header['BIASLVL'] = (mean_bias_level, 'Mean bias level of master bias')
        master_bias_image = Image(self.pipeline_context, data=master_bias, header=header)
        master_bias_image.filename = master_bias_filename
        master_bias_image.bpm = master_bpm

        logs.pop_tag(logging_tags, 'master_bias')
        logs.add_tag(logging_tags, 'filename', os.path.basename(master_bias_image.filename))
        logs.add_tag(logging_tags, 'BIASLVL', mean_bias_level)
        self.logger.debug('Average bias level in ADU', extra=logging_tags)

        return [master_bias_image]
예제 #13
0
    def make_master_calibration_frame(self, images):
        # Sort the images by reverse observation date, so that the most recent one
        # is used to create the filename and select the day directory
        images.sort(key=lambda image: image.dateobs, reverse=True)

        data_stack = np.zeros((images[0].ny, images[0].nx, len(images)),
                              dtype=np.float32)
        stack_mask = np.zeros((images[0].ny, images[0].nx, len(images)),
                              dtype=np.uint8)

        make_calibration_name = file_utils.make_calibration_filename_function(
            self.calibration_type, self.runtime_context)

        master_calibration_filename = make_calibration_name(images[0])

        for i, image in enumerate(images):
            logger.debug('Stacking Frames',
                         image=image,
                         extra_tags={
                             'master_calibration':
                             os.path.basename(master_calibration_filename)
                         })
            data_stack[:, :, i] = image.data[:, :]
            stack_mask[:, :, i] = image.bpm[:, :]

        stacked_data = stats.sigma_clipped_mean(data_stack,
                                                3.0,
                                                axis=2,
                                                mask=stack_mask,
                                                inplace=True)

        # Memory cleanup
        del data_stack
        del stack_mask

        master_bpm = np.array(stacked_data == 0.0, dtype=np.uint8)

        # Save the master dark image with all of the combined images in the header
        master_header = create_master_calibration_header(
            images[0].header, images)
        master_image = FRAME_CLASS(self.runtime_context,
                                   data=stacked_data,
                                   header=master_header)
        master_image.filename = master_calibration_filename
        master_image.bpm = master_bpm

        logger.info('Created master calibration stack',
                    image=master_image,
                    extra_tags={'calibration_type': self.calibration_type})
        return master_image
예제 #14
0
파일: bias.py 프로젝트: nvolgenau/banzai
def _subtract_overscan_3d(image, i):
    overscan_region = fits_utils.parse_region_keyword(image.extension_headers[i].get('BIASSEC'))
    if overscan_region is not None:
        overscan_level = stats.sigma_clipped_mean(image.data[i][overscan_region], 3)
        image.header['L1STATOV'] = (1, 'Status flag for overscan correction')
    else:
        overscan_level = 0.0
        image.header['L1STATOV'] = (0, 'Status flag for overscan correction')

    overscan_comment = 'Overscan value that was subtracted from Q{0}'.format(i + 1)
    image.header['OVERSCN{0}'.format(i + 1)] = (overscan_level, overscan_comment)

    image.data[i] -= overscan_level
    return overscan_level
예제 #15
0
파일: bias.py 프로젝트: baulml/banzai
def _subtract_overscan_3d(image, i):
    overscan_region = fits_utils.parse_region_keyword(image.extension_headers[i].get('BIASSEC'))
    if overscan_region is not None:
        overscan_level = stats.sigma_clipped_mean(image.data[i][overscan_region], 3)
        image.header['L1STATOV'] = (1, 'Status flag for overscan correction')
    else:
        overscan_level = 0.0
        image.header['L1STATOV'] = (0, 'Status flag for overscan correction')

    overscan_comment = 'Overscan value that was subtracted from Q{0}'.format(i + 1)
    image.header['OVERSCN{0}'.format(i + 1)] = (overscan_level, overscan_comment)

    image.data[i] -= overscan_level
    return overscan_level
예제 #16
0
    def make_master_calibration_frame(self, images, image_config,
                                      logging_tags):
        dark_data = np.zeros((images[0].ny, images[0].nx, len(images)),
                             dtype=np.float32)
        dark_mask = np.zeros((images[0].ny, images[0].nx, len(images)),
                             dtype=np.uint8)

        master_dark_filename = self.get_calibration_filename(images[0])

        logs.add_tag(logging_tags, 'master_dark',
                     os.path.basename(master_dark_filename))
        for i, image in enumerate(images):
            logs.add_tag(logging_tags, 'filename',
                         os.path.basename(image.filename))
            self.logger.debug('Combining dark', extra=logging_tags)

            dark_data[:, :, i] = image.data[:, :]
            dark_data[:, :, i] /= image.exptime
            dark_mask[:, :, i] = image.bpm[:, :]

        master_dark = stats.sigma_clipped_mean(dark_data,
                                               3.0,
                                               axis=2,
                                               mask=dark_mask,
                                               inplace=True)

        # Memory cleanup
        del dark_data
        del dark_mask

        master_bpm = np.array(master_dark == 0.0, dtype=np.uint8)
        master_dark[master_bpm] = 0.0

        # Save the master dark image with all of the combined images in the header
        master_dark_header = fits_utils.create_master_calibration_header(
            images)
        master_dark_image = Image(self.pipeline_context,
                                  data=master_dark,
                                  header=master_dark_header)
        master_dark_image.filename = master_dark_filename
        master_dark_image.bpm = master_bpm

        logs.pop_tag(logging_tags, 'master_dark')
        logs.add_tag(logging_tags, 'filename',
                     os.path.basename(master_dark_image.filename))
        self.logger.info('Created master dark', extra=logging_tags)
        return [master_dark_image]
예제 #17
0
파일: photometry.py 프로젝트: dg7541/banzai
    def do_stage(self, images):
        for i, image in enumerate(images):
            try:
                # Set the number of source pixels to be 5% of the total. This keeps us safe from
                # satellites and airplanes.
                sep.set_extract_pixstack(int(image.nx * image.ny * 0.05))

                data = image.data.copy()
                error = (np.abs(data) + image.readnoise ** 2.0) ** 0.5
                mask = image.bpm > 0

                # Fits can be backwards byte order, so fix that if need be and subtract
                # the background
                try:
                    bkg = sep.Background(data, mask=mask, bw=32, bh=32, fw=3, fh=3)
                except ValueError:
                    data = data.byteswap(True).newbyteorder()
                    bkg = sep.Background(data, mask=mask, bw=32, bh=32, fw=3, fh=3)
                bkg.subfrom(data)

                # Do an initial source detection
                # TODO: Add back in masking after we are sure SEP works
                sources = sep.extract(data, self.threshold, minarea=self.min_area,
                                      err=error, deblend_cont=0.005)

                # Convert the detections into a table
                sources = Table(sources)

                # Calculate the ellipticity
                sources['ellipticity'] = 1.0 - (sources['b'] / sources['a'])

                # Fix any value of theta that are invalid due to floating point rounding
                # -pi / 2 < theta < pi / 2
                sources['theta'][sources['theta'] > (np.pi / 2.0)] -= np.pi
                sources['theta'][sources['theta'] < (-np.pi / 2.0)] += np.pi

                # Calculate the kron radius
                kronrad, krflag = sep.kron_radius(data, sources['x'], sources['y'],
                                                  sources['a'], sources['b'],
                                                  sources['theta'], 6.0)
                sources['flag'] |= krflag
                sources['kronrad'] = kronrad

                # Calcuate the equivilent of flux_auto
                flux, fluxerr, flag = sep.sum_ellipse(data, sources['x'], sources['y'],
                                                      sources['a'], sources['b'],
                                                      np.pi / 2.0, 2.5 * kronrad,
                                                      subpix=1, err=error)
                sources['flux'] = flux
                sources['fluxerr'] = fluxerr
                sources['flag'] |= flag

                # Calculate the FWHMs of the stars:
                fwhm = 2.0 * (np.log(2) * (sources['a'] ** 2.0 + sources['b'] ** 2.0)) ** 0.5
                sources['fwhm'] = fwhm

                # Cut individual bright pixels. Often cosmic rays
                sources = sources[fwhm > 1.0]

                # Measure the flux profile
                flux_radii, flag = sep.flux_radius(data, sources['x'], sources['y'],
                                                   6.0 * sources['a'], [0.25, 0.5, 0.75],
                                                   normflux=sources['flux'], subpix=5)
                sources['flag'] |= flag
                sources['fluxrad25'] = flux_radii[:, 0]
                sources['fluxrad50'] = flux_radii[:, 1]
                sources['fluxrad75'] = flux_radii[:, 2]

                # Calculate the windowed positions
                sig = 2.0 / 2.35 * sources['fluxrad50']
                xwin, ywin, flag = sep.winpos(data, sources['x'], sources['y'], sig)
                sources['flag'] |= flag
                sources['xwin'] = xwin
                sources['ywin'] = ywin

                # Calculate the average background at each source
                bkgflux, fluxerr, flag = sep.sum_ellipse(bkg.back(), sources['x'], sources['y'],
                                                         sources['a'], sources['b'], np.pi / 2.0,
                                                         2.5 * sources['kronrad'], subpix=1)
                #masksum, fluxerr, flag = sep.sum_ellipse(mask, sources['x'], sources['y'],
                #                                         sources['a'], sources['b'], np.pi / 2.0,
                #                                         2.5 * kronrad, subpix=1)

                background_area = (2.5 * sources['kronrad']) ** 2.0 * sources['a'] * sources['b'] * np.pi # - masksum
                sources['background'] = bkgflux
                sources['background'][background_area > 0] /= background_area[background_area > 0]
                # Update the catalog to match fits convention instead of python array convention
                sources['x'] += 1.0
                sources['y'] += 1.0

                sources['xpeak'] += 1
                sources['ypeak'] += 1

                sources['xwin'] += 1.0
                sources['ywin'] += 1.0

                sources['theta'] = np.degrees(sources['theta'])

                image.catalog = sources['x', 'y', 'xwin', 'ywin', 'xpeak', 'ypeak',
                                        'flux', 'fluxerr', 'background', 'fwhm',
                                        'a', 'b', 'theta', 'kronrad', 'ellipticity',
                                        'fluxrad25', 'fluxrad50', 'fluxrad75',
                                        'x2', 'y2', 'xy', 'flag']

                # Add the units and description to the catalogs
                image.catalog['x'].unit = 'pixel'
                image.catalog['x'].description = 'X coordinate of the object'
                image.catalog['y'].unit = 'pixel'
                image.catalog['y'].description = 'Y coordinate of the object'
                image.catalog['xwin'].unit = 'pixel'
                image.catalog['xwin'].description = 'Windowed X coordinate of the object'
                image.catalog['ywin'].unit = 'pixel'
                image.catalog['ywin'].description = 'Windowed Y coordinate of the object'
                image.catalog['xpeak'].unit = 'pixel'
                image.catalog['xpeak'].description = 'X coordinate of the peak'
                image.catalog['ypeak'].unit = 'pixel'
                image.catalog['ypeak'].description = 'Windowed Y coordinate of the peak'
                image.catalog['flux'].unit = 'counts'
                image.catalog['flux'].description = 'Flux within a Kron-like elliptical aperture'
                image.catalog['fluxerr'].unit = 'counts'
                image.catalog['fluxerr'].description = 'Erronr on the flux within a Kron-like elliptical aperture'
                image.catalog['background'].unit = 'counts'
                image.catalog['background'].description = 'Average background value in the aperture'
                image.catalog['fwhm'].unit = 'pixel'
                image.catalog['fwhm'].description = 'FWHM of the object'
                image.catalog['a'].unit = 'pixel'
                image.catalog['a'].description = 'Semi-major axis of the object'
                image.catalog['b'].unit = 'pixel'
                image.catalog['b'].description = 'Semi-minor axis of the object'
                image.catalog['theta'].unit = 'degrees'
                image.catalog['theta'].description = 'Position angle of the object'
                image.catalog['kronrad'].unit = 'pixel'
                image.catalog['kronrad'].description = 'Kron radius used for extraction'
                image.catalog['ellipticity'].description = 'Ellipticity'
                image.catalog['fluxrad25'].unit = 'pixel'
                image.catalog['fluxrad25'].description = 'Radius containing 25% of the flux'
                image.catalog['fluxrad50'].unit = 'pixel'
                image.catalog['fluxrad50'].description = 'Radius containing 50% of the flux'
                image.catalog['fluxrad75'].unit = 'pixel'
                image.catalog['fluxrad75'].description = 'Radius containing 75% of the flux'
                image.catalog['x2'].unit = 'pixel^2'
                image.catalog['x2'].description = 'Variance on X coordinate of the object'
                image.catalog['y2'].unit = 'pixel^2'
                image.catalog['y2'].description = 'Variance on Y coordinate of the object'
                image.catalog['xy'].unit = 'pixel^2'
                image.catalog['xy'].description = 'XY covariance of the object'
                image.catalog['flag'].description = 'Bit mask combination of extraction and photometry flags'

                image.catalog.sort('flux')
                image.catalog.reverse()

                logging_tags = logs.image_config_to_tags(image, self.group_by_keywords)
                logs.add_tag(logging_tags, 'filename', os.path.basename(image.filename))

                # Save some background statistics in the header
                mean_background = stats.sigma_clipped_mean(bkg.back(), 5.0)
                image.header['L1MEAN'] = (mean_background,
                                          '[counts] Sigma clipped mean of frame background')
                logs.add_tag(logging_tags, 'L1MEAN', float(mean_background))

                median_background = np.median(bkg.back())
                image.header['L1MEDIAN'] = (median_background,
                                            '[counts] Median of frame background')
                logs.add_tag(logging_tags, 'L1MEDIAN', float(median_background))

                std_background = stats.robust_standard_deviation(bkg.back())
                image.header['L1SIGMA'] = (std_background,
                                           '[counts] Robust std dev of frame background')
                logs.add_tag(logging_tags, 'L1SIGMA', float(std_background))

                # Save some image statistics to the header
                good_objects = image.catalog['flag'] == 0

                seeing = np.median(image.catalog['fwhm'][good_objects]) * image.pixel_scale
                image.header['L1FWHM'] = (seeing, '[arcsec] Frame FWHM in arcsec')
                logs.add_tag(logging_tags, 'L1FWHM', float(seeing))

                mean_ellipticity = stats.sigma_clipped_mean(sources['ellipticity'][good_objects],
                                                            3.0)
                image.header['L1ELLIP'] = (mean_ellipticity, 'Mean image ellipticity (1-B/A)')
                logs.add_tag(logging_tags, 'L1ELLIP', float(mean_ellipticity))

                mean_position_angle = stats.sigma_clipped_mean(sources['theta'][good_objects], 3.0)
                image.header['L1ELLIPA'] = (mean_position_angle,
                                            '[deg] PA of mean image ellipticity')
                logs.add_tag(logging_tags, 'L1ELLIPA', float(mean_position_angle))

                self.logger.info('Extracted sources', extra=logging_tags)

            except Exception as e:
                logging_tags = logs.image_config_to_tags(image, self.group_by_keywords)
                logs.add_tag(logging_tags, 'filename', os.path.basename(image.filename))
                self.logger.error(e, extra=logging_tags)
        return images
예제 #18
0
파일: bias.py 프로젝트: baulml/banzai
 def do_stage(self, image):
     bias_level = stats.sigma_clipped_mean(image.data, 3.5, mask=image.bpm)
     logger.debug('Subtracting bias level', image=image, extra_tags={'BIASLVL': float(bias_level)})
     image.data -= bias_level
     image.header['BIASLVL'] = bias_level, 'Bias Level that was removed'
     return image
예제 #19
0
    def do_stage(self, image):
        try:
            # Set the number of source pixels to be 5% of the total. This keeps us safe from
            # satellites and airplanes.
            sep.set_extract_pixstack(int(image.nx * image.ny * 0.05))

            data = image.data.copy()
            error = (np.abs(data) + image.readnoise**2.0)**0.5
            mask = image.bpm > 0

            # Fits can be backwards byte order, so fix that if need be and subtract
            # the background
            try:
                bkg = sep.Background(data, mask=mask, bw=32, bh=32, fw=3, fh=3)
            except ValueError:
                data = data.byteswap(True).newbyteorder()
                bkg = sep.Background(data, mask=mask, bw=32, bh=32, fw=3, fh=3)
            bkg.subfrom(data)

            # Do an initial source detection
            # TODO: Add back in masking after we are sure SEP works
            sources = sep.extract(data,
                                  self.threshold,
                                  minarea=self.min_area,
                                  err=error,
                                  deblend_cont=0.005)

            # Convert the detections into a table
            sources = Table(sources)

            # We remove anything with a detection flag >= 8
            # This includes memory overflows and objects that are too close the edge
            sources = sources[sources['flag'] < 8]

            sources = array_utils.prune_nans_from_table(sources)

            # Calculate the ellipticity
            sources['ellipticity'] = 1.0 - (sources['b'] / sources['a'])

            # Fix any value of theta that are invalid due to floating point rounding
            # -pi / 2 < theta < pi / 2
            sources['theta'][sources['theta'] > (np.pi / 2.0)] -= np.pi
            sources['theta'][sources['theta'] < (-np.pi / 2.0)] += np.pi

            # Calculate the kron radius
            kronrad, krflag = sep.kron_radius(data, sources['x'], sources['y'],
                                              sources['a'], sources['b'],
                                              sources['theta'], 6.0)
            sources['flag'] |= krflag
            sources['kronrad'] = kronrad

            # Calcuate the equivilent of flux_auto
            flux, fluxerr, flag = sep.sum_ellipse(data,
                                                  sources['x'],
                                                  sources['y'],
                                                  sources['a'],
                                                  sources['b'],
                                                  np.pi / 2.0,
                                                  2.5 * kronrad,
                                                  subpix=1,
                                                  err=error)
            sources['flux'] = flux
            sources['fluxerr'] = fluxerr
            sources['flag'] |= flag

            # Do circular aperture photometry for diameters of 1" to 6"
            for diameter in [1, 2, 3, 4, 5, 6]:
                flux, fluxerr, flag = sep.sum_circle(data,
                                                     sources['x'],
                                                     sources['y'],
                                                     diameter / 2.0 /
                                                     image.pixel_scale,
                                                     gain=1.0,
                                                     err=error)
                sources['fluxaper{0}'.format(diameter)] = flux
                sources['fluxerr{0}'.format(diameter)] = fluxerr
                sources['flag'] |= flag

            # Calculate the FWHMs of the stars:
            fwhm = 2.0 * (np.log(2) *
                          (sources['a']**2.0 + sources['b']**2.0))**0.5
            sources['fwhm'] = fwhm

            # Cut individual bright pixels. Often cosmic rays
            sources = sources[fwhm > 1.0]

            # Measure the flux profile
            flux_radii, flag = sep.flux_radius(data,
                                               sources['x'],
                                               sources['y'],
                                               6.0 * sources['a'],
                                               [0.25, 0.5, 0.75],
                                               normflux=sources['flux'],
                                               subpix=5)
            sources['flag'] |= flag
            sources['fluxrad25'] = flux_radii[:, 0]
            sources['fluxrad50'] = flux_radii[:, 1]
            sources['fluxrad75'] = flux_radii[:, 2]

            # Calculate the windowed positions
            sig = 2.0 / 2.35 * sources['fluxrad50']
            xwin, ywin, flag = sep.winpos(data, sources['x'], sources['y'],
                                          sig)
            sources['flag'] |= flag
            sources['xwin'] = xwin
            sources['ywin'] = ywin

            # Calculate the average background at each source
            bkgflux, fluxerr, flag = sep.sum_ellipse(bkg.back(),
                                                     sources['x'],
                                                     sources['y'],
                                                     sources['a'],
                                                     sources['b'],
                                                     np.pi / 2.0,
                                                     2.5 * sources['kronrad'],
                                                     subpix=1)
            # masksum, fluxerr, flag = sep.sum_ellipse(mask, sources['x'], sources['y'],
            #                                         sources['a'], sources['b'], np.pi / 2.0,
            #                                         2.5 * kronrad, subpix=1)

            background_area = (
                2.5 * sources['kronrad']
            )**2.0 * sources['a'] * sources['b'] * np.pi  # - masksum
            sources['background'] = bkgflux
            sources['background'][background_area > 0] /= background_area[
                background_area > 0]
            # Update the catalog to match fits convention instead of python array convention
            sources['x'] += 1.0
            sources['y'] += 1.0

            sources['xpeak'] += 1
            sources['ypeak'] += 1

            sources['xwin'] += 1.0
            sources['ywin'] += 1.0

            sources['theta'] = np.degrees(sources['theta'])

            catalog = sources['x', 'y', 'xwin', 'ywin', 'xpeak', 'ypeak',
                              'flux', 'fluxerr', 'peak', 'fluxaper1',
                              'fluxerr1', 'fluxaper2', 'fluxerr2', 'fluxaper3',
                              'fluxerr3', 'fluxaper4', 'fluxerr4', 'fluxaper5',
                              'fluxerr5', 'fluxaper6', 'fluxerr6',
                              'background', 'fwhm', 'a', 'b', 'theta',
                              'kronrad', 'ellipticity', 'fluxrad25',
                              'fluxrad50', 'fluxrad75', 'x2', 'y2', 'xy',
                              'flag']

            # Add the units and description to the catalogs
            catalog['x'].unit = 'pixel'
            catalog['x'].description = 'X coordinate of the object'
            catalog['y'].unit = 'pixel'
            catalog['y'].description = 'Y coordinate of the object'
            catalog['xwin'].unit = 'pixel'
            catalog['xwin'].description = 'Windowed X coordinate of the object'
            catalog['ywin'].unit = 'pixel'
            catalog['ywin'].description = 'Windowed Y coordinate of the object'
            catalog['xpeak'].unit = 'pixel'
            catalog['xpeak'].description = 'X coordinate of the peak'
            catalog['ypeak'].unit = 'pixel'
            catalog['ypeak'].description = 'Windowed Y coordinate of the peak'
            catalog['flux'].unit = 'count'
            catalog[
                'flux'].description = 'Flux within a Kron-like elliptical aperture'
            catalog['fluxerr'].unit = 'count'
            catalog[
                'fluxerr'].description = 'Error on the flux within Kron aperture'
            catalog['peak'].unit = 'count'
            catalog['peak'].description = 'Peak flux (flux at xpeak, ypeak)'
            for diameter in [1, 2, 3, 4, 5, 6]:
                catalog['fluxaper{0}'.format(diameter)].unit = 'count'
                catalog['fluxaper{0}'.format(
                    diameter
                )].description = 'Flux from fixed circular aperture: {0}" diameter'.format(
                    diameter)
                catalog['fluxerr{0}'.format(diameter)].unit = 'count'
                catalog['fluxerr{0}'.format(
                    diameter
                )].description = 'Error on Flux from circular aperture: {0}"'.format(
                    diameter)

            catalog['background'].unit = 'count'
            catalog[
                'background'].description = 'Average background value in the aperture'
            catalog['fwhm'].unit = 'pixel'
            catalog['fwhm'].description = 'FWHM of the object'
            catalog['a'].unit = 'pixel'
            catalog['a'].description = 'Semi-major axis of the object'
            catalog['b'].unit = 'pixel'
            catalog['b'].description = 'Semi-minor axis of the object'
            catalog['theta'].unit = 'degree'
            catalog['theta'].description = 'Position angle of the object'
            catalog['kronrad'].unit = 'pixel'
            catalog['kronrad'].description = 'Kron radius used for extraction'
            catalog['ellipticity'].description = 'Ellipticity'
            catalog['fluxrad25'].unit = 'pixel'
            catalog[
                'fluxrad25'].description = 'Radius containing 25% of the flux'
            catalog['fluxrad50'].unit = 'pixel'
            catalog[
                'fluxrad50'].description = 'Radius containing 50% of the flux'
            catalog['fluxrad75'].unit = 'pixel'
            catalog[
                'fluxrad75'].description = 'Radius containing 75% of the flux'
            catalog['x2'].unit = 'pixel^2'
            catalog[
                'x2'].description = 'Variance on X coordinate of the object'
            catalog['y2'].unit = 'pixel^2'
            catalog[
                'y2'].description = 'Variance on Y coordinate of the object'
            catalog['xy'].unit = 'pixel^2'
            catalog['xy'].description = 'XY covariance of the object'
            catalog[
                'flag'].description = 'Bit mask of extraction/photometry flags'

            catalog.sort('flux')
            catalog.reverse()

            # Save some background statistics in the header
            mean_background = stats.sigma_clipped_mean(bkg.back(), 5.0)
            image.header['L1MEAN'] = (
                mean_background,
                '[counts] Sigma clipped mean of frame background')

            median_background = np.median(bkg.back())
            image.header['L1MEDIAN'] = (median_background,
                                        '[counts] Median of frame background')

            std_background = stats.robust_standard_deviation(bkg.back())
            image.header['L1SIGMA'] = (
                std_background, '[counts] Robust std dev of frame background')

            # Save some image statistics to the header
            good_objects = catalog['flag'] == 0
            for quantity in ['fwhm', 'ellipticity', 'theta']:
                good_objects = np.logical_and(
                    good_objects, np.logical_not(np.isnan(catalog[quantity])))
            if good_objects.sum() == 0:
                image.header['L1FWHM'] = ('NaN',
                                          '[arcsec] Frame FWHM in arcsec')
                image.header['L1ELLIP'] = ('NaN',
                                           'Mean image ellipticity (1-B/A)')
                image.header['L1ELLIPA'] = (
                    'NaN', '[deg] PA of mean image ellipticity')
            else:
                seeing = np.median(
                    catalog['fwhm'][good_objects]) * image.pixel_scale
                image.header['L1FWHM'] = (seeing,
                                          '[arcsec] Frame FWHM in arcsec')

                mean_ellipticity = stats.sigma_clipped_mean(
                    catalog['ellipticity'][good_objects], 3.0)
                image.header['L1ELLIP'] = (mean_ellipticity,
                                           'Mean image ellipticity (1-B/A)')

                mean_position_angle = stats.sigma_clipped_mean(
                    catalog['theta'][good_objects], 3.0)
                image.header['L1ELLIPA'] = (
                    mean_position_angle, '[deg] PA of mean image ellipticity')

            logging_tags = {
                key: float(image.header[key])
                for key in [
                    'L1MEAN', 'L1MEDIAN', 'L1SIGMA', 'L1FWHM', 'L1ELLIP',
                    'L1ELLIPA'
                ]
            }

            logger.info('Extracted sources',
                        image=image,
                        extra_tags=logging_tags)
            # adding catalog (a data table) to the appropriate images attribute.
            image.data_tables['catalog'] = DataTable(data_table=catalog,
                                                     name='CAT')
        except Exception:
            logger.error(logs.format_exception(), image=image)
        return image
예제 #20
0
    def do_stage(self, images):
        for i, image in enumerate(images):
            try:
                # Set the number of source pixels to be 5% of the total. This keeps us safe from
                # satellites and airplanes.
                sep.set_extract_pixstack(int(image.nx * image.ny * 0.05))

                data = image.data.copy()
                error = (np.abs(data) + image.readnoise**2.0)**0.5
                mask = image.bpm > 0

                # Fits can be backwards byte order, so fix that if need be and subtract
                # the background
                try:
                    bkg = sep.Background(data,
                                         mask=mask,
                                         bw=32,
                                         bh=32,
                                         fw=3,
                                         fh=3)
                except ValueError:
                    data = data.byteswap(True).newbyteorder()
                    bkg = sep.Background(data,
                                         mask=mask,
                                         bw=32,
                                         bh=32,
                                         fw=3,
                                         fh=3)
                bkg.subfrom(data)

                # Do an initial source detection
                # TODO: Add back in masking after we are sure SEP works
                sources = sep.extract(data,
                                      self.threshold,
                                      minarea=self.min_area,
                                      err=error,
                                      deblend_cont=0.005)

                # Convert the detections into a table
                sources = Table(sources)

                # Calculate the ellipticity
                sources['ellipticity'] = 1.0 - (sources['b'] / sources['a'])

                # Fix any value of theta that are invalid due to floating point rounding
                # -pi / 2 < theta < pi / 2
                sources['theta'][sources['theta'] > (np.pi / 2.0)] -= np.pi
                sources['theta'][sources['theta'] < (-np.pi / 2.0)] += np.pi

                # Calculate the kron radius
                kronrad, krflag = sep.kron_radius(data, sources['x'],
                                                  sources['y'], sources['a'],
                                                  sources['b'],
                                                  sources['theta'], 6.0)
                sources['flag'] |= krflag
                sources['kronrad'] = kronrad

                # Calcuate the equivilent of flux_auto
                flux, fluxerr, flag = sep.sum_ellipse(data,
                                                      sources['x'],
                                                      sources['y'],
                                                      sources['a'],
                                                      sources['b'],
                                                      np.pi / 2.0,
                                                      2.5 * kronrad,
                                                      subpix=1,
                                                      err=error)
                sources['flux'] = flux
                sources['fluxerr'] = fluxerr
                sources['flag'] |= flag

                # Calculate the FWHMs of the stars:
                fwhm = 2.0 * (np.log(2) *
                              (sources['a']**2.0 + sources['b']**2.0))**0.5
                sources['fwhm'] = fwhm

                # Cut individual bright pixels. Often cosmic rays
                sources = sources[fwhm > 1.0]

                # Measure the flux profile
                flux_radii, flag = sep.flux_radius(data,
                                                   sources['x'],
                                                   sources['y'],
                                                   6.0 * sources['a'],
                                                   [0.25, 0.5, 0.75],
                                                   normflux=sources['flux'],
                                                   subpix=5)
                sources['flag'] |= flag
                sources['fluxrad25'] = flux_radii[:, 0]
                sources['fluxrad50'] = flux_radii[:, 1]
                sources['fluxrad75'] = flux_radii[:, 2]

                # Calculate the windowed positions
                sig = 2.0 / 2.35 * sources['fluxrad50']
                xwin, ywin, flag = sep.winpos(data, sources['x'], sources['y'],
                                              sig)
                sources['flag'] |= flag
                sources['xwin'] = xwin
                sources['ywin'] = ywin

                # Calculate the average background at each source
                bkgflux, fluxerr, flag = sep.sum_ellipse(bkg.back(),
                                                         sources['x'],
                                                         sources['y'],
                                                         sources['a'],
                                                         sources['b'],
                                                         np.pi / 2.0,
                                                         2.5 *
                                                         sources['kronrad'],
                                                         subpix=1)
                #masksum, fluxerr, flag = sep.sum_ellipse(mask, sources['x'], sources['y'],
                #                                         sources['a'], sources['b'], np.pi / 2.0,
                #                                         2.5 * kronrad, subpix=1)

                background_area = (
                    2.5 * sources['kronrad']
                )**2.0 * sources['a'] * sources['b'] * np.pi  # - masksum
                sources['background'] = bkgflux
                sources['background'][background_area > 0] /= background_area[
                    background_area > 0]
                # Update the catalog to match fits convention instead of python array convention
                sources['x'] += 1.0
                sources['y'] += 1.0

                sources['xpeak'] += 1
                sources['ypeak'] += 1

                sources['xwin'] += 1.0
                sources['ywin'] += 1.0

                sources['theta'] = np.degrees(sources['theta'])

                image.catalog = sources['x', 'y', 'xwin', 'ywin', 'xpeak',
                                        'ypeak', 'flux', 'fluxerr',
                                        'background', 'fwhm', 'a', 'b',
                                        'theta', 'kronrad', 'ellipticity',
                                        'fluxrad25', 'fluxrad50', 'fluxrad75',
                                        'x2', 'y2', 'xy', 'flag']

                # Add the units and description to the catalogs
                image.catalog['x'].unit = 'pixel'
                image.catalog['x'].description = 'X coordinate of the object'
                image.catalog['y'].unit = 'pixel'
                image.catalog['y'].description = 'Y coordinate of the object'
                image.catalog['xwin'].unit = 'pixel'
                image.catalog[
                    'xwin'].description = 'Windowed X coordinate of the object'
                image.catalog['ywin'].unit = 'pixel'
                image.catalog[
                    'ywin'].description = 'Windowed Y coordinate of the object'
                image.catalog['xpeak'].unit = 'pixel'
                image.catalog['xpeak'].description = 'X coordinate of the peak'
                image.catalog['ypeak'].unit = 'pixel'
                image.catalog[
                    'ypeak'].description = 'Windowed Y coordinate of the peak'
                image.catalog['flux'].unit = 'counts'
                image.catalog[
                    'flux'].description = 'Flux within a Kron-like elliptical aperture'
                image.catalog['fluxerr'].unit = 'counts'
                image.catalog[
                    'fluxerr'].description = 'Erronr on the flux within a Kron-like elliptical aperture'
                image.catalog['background'].unit = 'counts'
                image.catalog[
                    'background'].description = 'Average background value in the aperture'
                image.catalog['fwhm'].unit = 'pixel'
                image.catalog['fwhm'].description = 'FWHM of the object'
                image.catalog['a'].unit = 'pixel'
                image.catalog[
                    'a'].description = 'Semi-major axis of the object'
                image.catalog['b'].unit = 'pixel'
                image.catalog[
                    'b'].description = 'Semi-minor axis of the object'
                image.catalog['theta'].unit = 'degrees'
                image.catalog[
                    'theta'].description = 'Position angle of the object'
                image.catalog['kronrad'].unit = 'pixel'
                image.catalog[
                    'kronrad'].description = 'Kron radius used for extraction'
                image.catalog['ellipticity'].description = 'Ellipticity'
                image.catalog['fluxrad25'].unit = 'pixel'
                image.catalog[
                    'fluxrad25'].description = 'Radius containing 25% of the flux'
                image.catalog['fluxrad50'].unit = 'pixel'
                image.catalog[
                    'fluxrad50'].description = 'Radius containing 50% of the flux'
                image.catalog['fluxrad75'].unit = 'pixel'
                image.catalog[
                    'fluxrad75'].description = 'Radius containing 75% of the flux'
                image.catalog['x2'].unit = 'pixel^2'
                image.catalog[
                    'x2'].description = 'Variance on X coordinate of the object'
                image.catalog['y2'].unit = 'pixel^2'
                image.catalog[
                    'y2'].description = 'Variance on Y coordinate of the object'
                image.catalog['xy'].unit = 'pixel^2'
                image.catalog['xy'].description = 'XY covariance of the object'
                image.catalog[
                    'flag'].description = 'Bit mask combination of extraction and photometry flags'

                image.catalog.sort('flux')
                image.catalog.reverse()

                logging_tags = logs.image_config_to_tags(
                    image, self.group_by_keywords)
                logs.add_tag(logging_tags, 'filename',
                             os.path.basename(image.filename))

                # Save some background statistics in the header
                mean_background = stats.sigma_clipped_mean(bkg.back(), 5.0)
                image.header['L1MEAN'] = (
                    mean_background,
                    '[counts] Sigma clipped mean of frame background')
                logs.add_tag(logging_tags, 'L1MEAN', float(mean_background))

                median_background = np.median(bkg.back())
                image.header['L1MEDIAN'] = (
                    median_background, '[counts] Median of frame background')
                logs.add_tag(logging_tags, 'L1MEDIAN',
                             float(median_background))

                std_background = stats.robust_standard_deviation(bkg.back())
                image.header['L1SIGMA'] = (
                    std_background,
                    '[counts] Robust std dev of frame background')
                logs.add_tag(logging_tags, 'L1SIGMA', float(std_background))

                # Save some image statistics to the header
                good_objects = image.catalog['flag'] == 0

                seeing = np.median(
                    image.catalog['fwhm'][good_objects]) * image.pixel_scale
                image.header['L1FWHM'] = (seeing,
                                          '[arcsec] Frame FWHM in arcsec')
                logs.add_tag(logging_tags, 'L1FWHM', float(seeing))

                mean_ellipticity = stats.sigma_clipped_mean(
                    sources['ellipticity'][good_objects], 3.0)
                image.header['L1ELLIP'] = (mean_ellipticity,
                                           'Mean image ellipticity (1-B/A)')
                logs.add_tag(logging_tags, 'L1ELLIP', float(mean_ellipticity))

                mean_position_angle = stats.sigma_clipped_mean(
                    sources['theta'][good_objects], 3.0)
                image.header['L1ELLIPA'] = (
                    mean_position_angle, '[deg] PA of mean image ellipticity')
                logs.add_tag(logging_tags, 'L1ELLIPA',
                             float(mean_position_angle))

                self.logger.info('Extracted sources', extra=logging_tags)

            except Exception as e:
                logging_tags = logs.image_config_to_tags(
                    image, self.group_by_keywords)
                logs.add_tag(logging_tags, 'filename',
                             os.path.basename(image.filename))
                self.logger.error(e, extra=logging_tags)
        return images