Exemplo n.º 1
0
def test_flat_correct_min_value(ccd_data):
    size = ccd_data.shape[0]

    # create the flat
    data = 2 * np.random.normal(loc=1.0, scale=0.05, size=(size, size))
    flat = CCDData(data, meta=fits.header.Header(), unit=ccd_data.unit)
    flat_orig_data = flat.data.copy()
    min_value = 2.1  # should replace some, but not all, values
    flat_corrected_data = flat_correct(ccd_data, flat, min_value=min_value)
    flat_with_min = flat.copy()
    flat_with_min.data[flat_with_min.data < min_value] = min_value

    # Check that the flat was normalized. The asserts below, which look a
    # little odd, are correctly testing that
    #    flat_corrected_data = ccd_data / (flat_with_min / mean(flat_with_min))
    np.testing.assert_almost_equal(
        (flat_corrected_data.data * flat_with_min.data).mean(),
        (ccd_data.data * flat_with_min.data.mean()).mean()
    )
    np.testing.assert_allclose(ccd_data.data / flat_corrected_data.data,
                               flat_with_min.data / flat_with_min.data.mean())

    # Test that flat is not modified.
    assert (flat_orig_data == flat.data).all()
    assert flat_orig_data is not flat.data
Exemplo n.º 2
0
    def sum_combine(self, sum_func=ma.sum, scale_to=None,
                    uncertainty_func=ma.std):
        """
        Sum combine together a set of arrays.

        A `~astropy.nddata.CCDData` object is returned with the data property
        set to the sum of the arrays. If the data was masked or any
        data have been rejected, those pixels will not be included in the
        sum. A mask will be returned, and if a pixel has been
        rejected in all images, it will be masked. The uncertainty of
        the combined image is set by the multiplication of summation of
        standard deviation of the input by square root of number of images.
        Because sum_combine returns 'pure sum' with masked pixels ignored, if
        re-scaled sum is needed, average_combine have to be used with
        multiplication by number of images combined.

        Parameters
        ----------
        sum_func : function, optional
            Function to calculate the sum. Defaults to
            `numpy.ma.sum`.

        scale_to : float or None, optional
            Scaling factor used in the sum combined image. If given,
            it overrides `scaling`. Defaults to ``None``.

        uncertainty_func : function, optional
            Function to calculate uncertainty. Defaults to `numpy.ma.std`.

        Returns
        -------
        combined_image: `~astropy.nddata.CCDData`
            CCDData object based on the combined input of CCDData objects.
        """
        # set up the data
        data = sum_func(self._get_scaled_data(scale_to), axis=0)

        # set up the mask
        masked_values = self.data_arr.mask.sum(axis=0)
        mask = (masked_values == len(self.data_arr))

        # set up the deviation
        uncertainty = uncertainty_func(self.data_arr, axis=0)
        # Divide uncertainty by the number of pixel (#309)
        uncertainty /= np.sqrt(len(self.data_arr) - masked_values)
        # Convert uncertainty to plain numpy array (#351)
        uncertainty = np.asarray(uncertainty)
        # Multiply uncertainty by square root of the number of images
        uncertainty *= len(self.data_arr) - masked_values

        # create the combined image with a dtype that matches the combiner
        combined_image = CCDData(np.asarray(data.data, dtype=self.dtype),
                                 mask=mask, unit=self.unit,
                                 uncertainty=StdDevUncertainty(uncertainty))

        # update the meta data
        combined_image.meta['NCOMBINE'] = len(self.data_arr)

        # return the combined image
        return combined_image
Exemplo n.º 3
0
def test_block_average():
    ccd = CCDData(np.ones((4, 4)), unit='adu', meta={'testkw': 1},
                  mask=np.zeros((4, 4), dtype=bool),
                  uncertainty=StdDevUncertainty(np.ones((4, 4))),
                  wcs=np.zeros((4, 4)))
    ccd.data[::2, ::2] = 2
    with catch_warnings(AstropyUserWarning) as w:
        ccd_avgd = block_average(ccd, (2, 2))
    assert len(w) == 1
    assert 'following attributes were set' in str(w[0].message)

    assert isinstance(ccd_avgd, CCDData)
    assert np.all(ccd_avgd.data == 1.25)
    assert ccd_avgd.data.shape == (2, 2)
    assert ccd_avgd.unit == u.adu
    # Other attributes are set to None. In case the function is modified to
    # work on these attributes correctly those tests need to be updated!
    assert ccd_avgd.meta == {'testkw': 1}
    assert ccd_avgd.mask is None
    assert ccd_avgd.wcs is None
    assert ccd_avgd.uncertainty is None

    # Make sure meta is copied
    ccd_avgd.meta['testkw2'] = 10
    assert 'testkw2' not in ccd.meta
Exemplo n.º 4
0
def ccd_data(request):
    """
    Return a CCDData object with units of ADU.

    The size of the data array is 100x100 but can be changed using the marker
    @pytest.mark.data_size(N) on the test function, where N should be the
    desired dimension.

    Data values are initialized to random numbers drawn from a normal
    distribution with mean of 0 and scale 1.

    The scale can be changed with the marker @pytest.marker.scale(s) on the
    test function, where s is the desired scale.

    The mean can be changed with the marker @pytest.marker.scale(m) on the
    test function, where m is the desired mean.
    """
    size = value_from_markers('data_size', request)
    scale = value_from_markers('data_scale', request)
    mean = value_from_markers('data_mean', request)

    with NumpyRNGContext(DEFAULTS['seed']):
        data = np.random.normal(loc=mean, size=[size, size], scale=scale)

    fake_meta = {'my_key': 42, 'your_key': 'not 42'}
    ccd = CCDData(data, unit=u.adu)
    ccd.header = fake_meta
    return ccd
Exemplo n.º 5
0
def test_ccd_process_gain_corrected():
    # test the through ccd_process with gain_corrected as False
    ccd_data = CCDData(10.0 * np.ones((100, 100)), unit=u.adu)
    ccd_data.data[:, -10:] = 2
    ccd_data.meta['testkw'] = 100

    mask = np.zeros((100, 90))

    masterbias = CCDData(4.0 * np.ones((100, 90)), unit=u.adu)
    masterbias.uncertainty = StdDevUncertainty(np.zeros((100, 90)))

    dark_frame = CCDData(0.0 * np.ones((100, 90)), unit=u.adu)
    dark_frame.uncertainty = StdDevUncertainty(np.zeros((100, 90)))

    masterflat = CCDData(5.0 * np.ones((100, 90)), unit=u.adu)
    masterflat.uncertainty = StdDevUncertainty(np.zeros((100, 90)))

    occd = ccd_process(ccd_data, oscan=ccd_data[:, -10:], trim='[1:90,1:100]',
                       error=True, master_bias=masterbias,
                       master_flat=masterflat, dark_frame=dark_frame,
                       bad_pixel_mask=mask, gain=0.5 * u.electron/u.adu,
                       readnoise=5**0.5 * u.electron, oscan_median=True,
                       dark_scale=False, dark_exposure=1.*u.s,
                       data_exposure=1.*u.s, gain_corrected=False)

    # final results should be (10 - 2) / 2.0 - 2 = 2
    # error should be (4 + 5)**0.5 / 0.5  = 3.0

    np.testing.assert_array_equal(2.0 * np.ones((100, 90)), occd.data)
    np.testing.assert_almost_equal(3.0 * np.ones((100, 90)),
                                  occd.uncertainty.array)
    np.testing.assert_array_equal(mask, occd.mask)
    assert(occd.unit == u.electron)
    # Make sure the original keyword is still present. Regression test for #401
    assert occd.meta['testkw'] == 100
Exemplo n.º 6
0
def clean_image(image_file):
    """
    Cleans a single image.  Cleaning includes:
        -Gain application
        -Cosmic ray removal
    
    Returns: astropy.nddata.ccddata.CCDData object
    
    Header needs: 'GAINI' (optional, but good); 'EXPTIME', 'BINFAC'
    """

    # Read in image and header
    img, hdr = fits.getdata(image_file, header=True)
    if 'GAINI' in hdr:
        gain = hdr['GAINI'] * u.electron / u.adu
    else:
        print("Gain not found in header: assuming 1 e/adu")
        gain = 1 * u.electron / u.adu

    readnoise = 5 * u.electron  #From STA1600LN data sheet

    # Define data and uncertainty
    data = CCDData(img, unit=u.adu)
    data_with_deviation = ccdproc.create_deviation(data,
                                                   gain=gain,
                                                   readnoise=readnoise)
    data_with_deviation.header['exposure'] = float(hdr['EXPTIME'])

    # Apply gain to data
    gain_corrected = ccdproc.gain_correct(data_with_deviation, gain)

    # Clean cosmic rays
    cr_cleaned = ccdproc.cosmicray_lacosmic(gain_corrected, sigclip=5)

    return cr_cleaned
Exemplo n.º 7
0
def test_adding_markers_as_world_recovers_with_get_markers():
    """
    Make sure that our internal conversion from world to pixel
    coordinates doesn't mess anything up.
    """
    npix_side = 100
    fake_image = np.random.randn(npix_side, npix_side)
    wcs = WCS(naxis=2)
    wcs.wcs.crpix = (fake_image.shape[0] / 2, fake_image.shape[1] / 2)
    wcs.wcs.ctype = ('RA---TAN', 'DEC--TAN')
    wcs.wcs.crval = (314.275419158, 31.6662781301)
    wcs.wcs.pc = [[0.000153051015113, -3.20700931602e-05],
                  [3.20704370872e-05, 0.000153072382405]]
    fake_ccd = CCDData(data=fake_image, wcs=wcs, unit='adu')
    iw = ImageWidget(pixel_coords_offset=0)
    iw.load_nddata(fake_ccd)
    # Get me 100 positions please, not right at the edge
    marker_locs = np.random.randint(10,
                                    high=npix_side - 10,
                                    size=(100, 2))
    marks_pix = Table(data=marker_locs, names=['x', 'y'])
    marks_world = wcs.all_pix2world(marker_locs, 0)
    marks_coords = SkyCoord(marks_world, unit='degree')
    mark_coord_table = Table(data=[marks_coords], names=['coord'])
    iw.add_markers(mark_coord_table, use_skycoord=True)
    result = iw.get_markers()
    # Check the x, y positions as long as we are testing things...
    np.testing.assert_allclose(result['x'], marks_pix['x'])
    np.testing.assert_allclose(result['y'], marks_pix['y'])
    np.testing.assert_allclose(result['coord'].ra.deg,
                               mark_coord_table['coord'].ra.deg)
    np.testing.assert_allclose(result['coord'].dec.deg,
                               mark_coord_table['coord'].dec.deg)
Exemplo n.º 8
0
    def to_object(self, data_or_subset, attribute=None):
        """
        Convert a glue Data object to a CCDData object.

        Parameters
        ----------
        data_or_subset : `glue.core.data.Data` or `glue.core.subset.Subset`
            The data to convert to a Spectrum1D object
        attribute : `glue.core.component_id.ComponentID`
            The attribute to use for the Spectrum1D data
        """

        if isinstance(data_or_subset, Subset):
            data = data_or_subset.data
            subset_state = data_or_subset.subset_state
        else:
            data = data_or_subset
            subset_state = None

        if isinstance(data.coords, WCS):
            wcs = data.coords
        elif type(data.coords) is Coordinates or data.coords is None:
            wcs = None
        else:
            raise TypeError(
                'data.coords should be an instance of Coordinates or WCS')

        if isinstance(attribute, str):
            attribute = data.id[attribute]
        elif len(data.main_components) == 0:
            raise ValueError('Data object has no attributes.')
        elif attribute is None:
            if len(data.main_components) == 1:
                attribute = data.main_components[0]
            else:
                raise ValueError(
                    "Data object has more than one attribute, so "
                    "you will need to specify which one to use as "
                    "the flux for the spectrum using the "
                    "attribute= keyword argument.")

        component = data.get_component(attribute)

        if data.ndim != 2:
            raise ValueError(
                "Only 2-dimensional datasets can be converted to CCDData")

        values = data.get_data(attribute)

        if subset_state is None:
            mask = None
        else:
            mask = data.get_mask(subset_state=subset_state)
            values = values.copy()
            # Flip mask to match astropy.ndddata formalism
            mask = ~mask

        values = values * u.Unit(component.units)

        return CCDData(values, mask=mask, wcs=wcs, meta=data.meta)
Exemplo n.º 9
0
def make_fake_data(nimg,
                   outdir,
                   nsources=100,
                   shape=(2048, 2048),
                   dtype=np.float32):
    # Set a seed so that the tests are repeatable
    np.random.seed(200)

    # Add some fake sources
    sources = np.zeros(shape, dtype=np.float32)
    xx = np.random.uniform(low=0.0, high=shape[0], size=nsources)
    yy = np.random.uniform(low=0.0, high=shape[1], size=nsources)
    brightness = np.random.uniform(low=1000., high=30000., size=nsources)
    for x, y, b in zip(xx, yy, brightness):
        sources += gaussian(shape, x, y, b, 5)

    for i in range(nimg):
        # Create a simulated image to use in our tests
        imdata = np.zeros(shape, dtype=dtype)
        # Add sky and sky noise
        imdata += 200

        imdata += sources

        # Add the poisson noise
        imdata = np.float32(np.random.poisson(imdata))

        # Add readnoise
        imdata += np.random.normal(0.0, 10.0, size=shape)

        # Add 100 fake cosmic rays
        cr_x = np.random.randint(low=5, high=shape[0] - 5, size=100)
        cr_y = np.random.randint(low=5, high=shape[1] - 5, size=100)
        cr_brightnesses = np.random.uniform(low=1000.0, high=30000.0, size=100)
        imdata[cr_y, cr_x] += cr_brightnesses
        imdata = imdata.astype('f4')

        # Make a mask where the detected cosmic rays should be
        # crmask = np.zeros(shape, dtype=np.bool)
        # crmask[cr_y, cr_x] = True

        ccd = CCDData(imdata,
                      uncertainty=VarianceUncertainty(imdata / 10),
                      unit="electron")
        ccd.write(os.path.join(outdir, f'image-{i+1:02d}.fits'),
                  overwrite=True)
        print('.', end='')
Exemplo n.º 10
0
def test_subtract_dark(ccd_data, explicit_times, scale, exposure_keyword):
    exptime = 30.0
    exptime_key = 'exposure'
    exposure_unit = u.second
    dark_level = 1.7
    master_dark_data = np.zeros_like(ccd_data.data) + dark_level
    master_dark = CCDData(master_dark_data, unit=u.adu)
    master_dark.header[exptime_key] = 2 * exptime
    dark_exptime = master_dark.header[exptime_key]
    ccd_data.header[exptime_key] = exptime
    dark_exposure_unit = exposure_unit
    if explicit_times:
        # test case when units of dark and data exposures are different
        dark_exposure_unit = u.minute
        dark_sub = subtract_dark(ccd_data,
                                 master_dark,
                                 dark_exposure=dark_exptime *
                                 dark_exposure_unit,
                                 data_exposure=exptime * exposure_unit,
                                 scale=scale,
                                 add_keyword=None)
    elif exposure_keyword:
        key = Keyword(exptime_key, unit=u.second)
        dark_sub = subtract_dark(ccd_data,
                                 master_dark,
                                 exposure_time=key,
                                 scale=scale,
                                 add_keyword=None)
    else:
        dark_sub = subtract_dark(ccd_data,
                                 master_dark,
                                 exposure_time=exptime_key,
                                 exposure_unit=u.second,
                                 scale=scale,
                                 add_keyword=None)

    dark_scale = 1.0
    if scale:
        dark_scale = float(
            (exptime / dark_exptime) * (exposure_unit / dark_exposure_unit))

    np.testing.assert_array_equal(ccd_data.data - dark_scale * dark_level,
                                  dark_sub.data)
    # Headers should have the same content...do they?
    assert dark_sub.header == ccd_data.header
    # But the headers should not be the same object -- a copy was made
    assert dark_sub.header is not ccd_data.header
Exemplo n.º 11
0
def make_2dspec_image(
    nx=3000,
    ny=1000,
    background=5,
    trace_center=None,
    trace_order=3,
    trace_coeffs={'c0': 0, 'c1': 50, 'c2': 100},
    source_amplitude=10,
    source_alpha=0.1
):
    """
    Create synthetic 2D spectroscopic image with a single source. The spatial (y-axis) position
    of the source along the dispersion (x-axis) direction is modeled using a Chebyshev polynomial.
    The flux units are counts and the noise is modeled as Poisson.

    Parameters
    ----------
    nx : int (default=3000)
        Size of image in X axis which is assumed to be the dispersion axis
    ny : int (default=1000)
        Size of image in Y axis which is assumed to be the spatial axis
    background : int (default=5)
        Level of constant background in counts
    trace_center : int (default=None)
        Zeropoint of the trace. If None, then use center of Y (spatial) axis.
    trace_order : int (default=3)
        Order of the Chebyshev polynomial used to model the source's trace
    trace_coeffs : dict (default={'c0': 0, 'c1': 50, 'c2': 100})
        Dict containing the Chebyshev polynomial coefficients to use in the trace model
    source_amplitude : int (default=10)
        Amplitude of modeled source in counts
    source_alpha : float (default=0.1)
        Power index of the source's Moffat profile. Use small number here to emulate
        extended source.

    Returns
    -------
    ccd_im : `~astropy.nddata.CCDData`
        CCDData instance containing synthetic 2D spectroscopic image
    """
    x = np.arange(nx)
    y = np.arange(ny)
    xx, yy = np.meshgrid(x, y)

    profile = models.Moffat1D()
    profile.amplitude = source_amplitude
    profile.alpha = source_alpha

    if trace_center is None:
        trace_center = ny / 2

    trace_mod = models.Chebyshev1D(degree=trace_order, **trace_coeffs)
    trace = yy - trace_center + trace_mod(xx/nx)
    z = background + profile(trace)
    noisy_image = apply_poisson_noise(z)

    ccd_im = CCDData(noisy_image, unit=u.count)

    return ccd_im
Exemplo n.º 12
0
def test_combiner_uncertainty_median_mask():
    mad_to_sigma = 1.482602218505602
    mask = np.zeros((10, 10), dtype=np.bool_)
    mask[5, 5] = True
    ccd_with_mask = CCDData(np.ones((10, 10)), unit=u.adu, mask=mask)
    ccd_list = [ccd_with_mask,
                CCDData(np.ones((10, 10))*2, unit=u.adu),
                CCDData(np.ones((10, 10))*3, unit=u.adu)]
    c = Combiner(ccd_list)
    ccd = c.median_combine()
    # Just the standard deviation of ccd data.
    ref_uncertainty = np.ones((10, 10)) * mad_to_sigma * mad([1, 2, 3])
    # Correction because we combined two images.
    ref_uncertainty /= np.sqrt(3)  # 0.855980789955
    ref_uncertainty[5, 5] = mad_to_sigma * mad([2, 3]) / np.sqrt(2) # 0.524179041254
    np.testing.assert_array_almost_equal(ccd.uncertainty.array,
                                         ref_uncertainty)
Exemplo n.º 13
0
def test_combiner_result_dtype():
    """Regression test: #391

    The result should have the appropriate dtype not the dtype of the first
    input."""
    ccd = CCDData(np.ones((3, 3), dtype=np.uint16), unit='adu')
    res = combine([ccd, ccd.multiply(2)])
    # The default dtype of Combiner is float64
    assert res.data.dtype == np.float64
    ref = np.ones((3, 3)) * 1.5
    np.testing.assert_array_almost_equal(res.data, ref)

    res = combine([ccd, ccd.multiply(2), ccd.multiply(3)], dtype=int)
    # The result dtype should be integer:
    assert res.data.dtype == np.int_
    ref = np.ones((3, 3)) * 2
    np.testing.assert_array_almost_equal(res.data, ref)
Exemplo n.º 14
0
    def onBiasSubstraction(self):
        self.fitImageTableWidget.currentFileInfo.currentFolderLocation = self.rawFolderLocation + '/dark'

        darkPath = Path(
            self.fitImageTableWidget.currentFileInfo.currentFolderLocation)
        Path.mkdir(darkPath, mode=0o777, exist_ok=True)

        darkFitFileList = self.fitImageTableWidget.currentFileInfo.fitFileList[
            self.fitImageTableWidget.currentFileInfo.fitFileList['IMAGETYPE']
            == 'Dark Frame']
        biasFitFileList = self.fitImageTableWidget.currentFileInfo.fitFileList[
            self.fitImageTableWidget.currentFileInfo.fitFileList['IMAGETYPE']
            == 'Bias Frame']
        #bias data 불러오기
        biasFileName = self.rawFolderLocation + '/combine/' + biasFitFileList[
            'FILE-NAME'].iloc[0]
        biasHdr, biasData = openFitData(biasFileName)

        #dark data 불러오기 및 빼기
        nlist = len(darkFitFileList)
        i = 0
        for darkFile in darkFitFileList['FILE-NAME']:
            savePath = darkPath / darkFile
            darkFileName = self.rawFolderLocation + '/combine/' + darkFile
            darkHdr, darkData = openFitData(darkFileName)
            data = darkData - biasData
            hdr = darkHdr
            hdr['HISTORY'] = 'BiasSub'
            subbedCCD = CCDData(data=data, header=hdr, unit="adu")
            subbedCCD.write(savePath, overwrite=True)
            self.step = i / nlist * 100
            self.onProgressChange()
        files = list(darkPath.glob("*.fit"))
        fileInfo = fileOpener(files)
        print(fileInfo)
        fileInfo = np.hstack((fileInfo, np.zeros((fileInfo.shape[0], 1), str)))
        darkFileList = pd.DataFrame(np.array(fileInfo),
                                    columns=[
                                        'FILE-NAME', 'DATE-OBS', 'EXPTIME',
                                        'IMAGETYPE', 'OBJECT', 'REMARKS'
                                    ])
        self.fitImageTableWidget.currentFileInfo.fitFileList = darkFileList
        self.darkFileList = darkFileList
        self.fitImageTableWidget.tableEdit()
        self.biasSubstractionBtn.setEnabled(False)
        self.darkSubstractionBtn.setEnabled(True)
Exemplo n.º 15
0
 def sky_subtraction(self, order=3, filepath=None):
     '''
     Do polynomial-fitting sky subtraction
     Parameters
     ----------
     order (optional) : int
         order of the polynomial
     '''
     data = np.array(self.data.copy())
     maskplus = self.data.mask.copy()
     backR = polynomialfit(data, maskplus.astype(bool), order=order)
     background = backR['bkg']
     self.ss_data = CCDData(data - background, unit=self.data.unit)
     self.ss_data.mask = maskplus
     if filepath is not None:
         hdu_temp = fits.PrimaryHDU(data - background)
         hdu_temp.writeto(filepath, overwrite=True)
Exemplo n.º 16
0
def test_flat_correct_does_not_change_input():
    ccd_data = ccd_data_func()
    original = ccd_data.copy()
    flat = CCDData(np.zeros_like(ccd_data), unit=ccd_data.unit)
    with np.errstate(invalid="ignore"):
        _ = flat_correct(ccd_data, flat=flat)
    np.testing.assert_array_equal(original.data, ccd_data.data)
    assert original.unit == ccd_data.unit
Exemplo n.º 17
0
def test_subtract_dark_fails(ccd_data):
    # None of these tests check a result so the content of the master
    # can be anything.
    ccd_data.header['exptime'] = 30.0
    master = ccd_data.copy()

    # Do we fail if we give one of dark_exposure, data_exposure but not both?
    with pytest.raises(TypeError):
        subtract_dark(ccd_data, master, dark_exposure=30 * u.second)
    with pytest.raises(TypeError):
        subtract_dark(ccd_data, master, data_exposure=30 * u.second)

    # Do we fail if we supply dark_exposure and data_exposure and exposure_time
    with pytest.raises(TypeError):
        subtract_dark(ccd_data,
                      master,
                      dark_exposure=10 * u.second,
                      data_exposure=10 * u.second,
                      exposure_time='exptime')

    # Fail if we supply none of the exposure-related arguments?
    with pytest.raises(TypeError):
        subtract_dark(ccd_data, master)

    # Fail if we supply exposure time but not a unit?
    with pytest.raises(TypeError):
        subtract_dark(ccd_data, master, exposure_time='exptime')

    # Fail if ccd_data or master are not CCDData objects?
    with pytest.raises(TypeError):
        subtract_dark(ccd_data.data, master, exposure_time='exptime')
    with pytest.raises(TypeError):
        subtract_dark(ccd_data, master.data, exposure_time='exptime')

    # Fail if units do not match...

    # ...when there is no scaling?
    master = CCDData(ccd_data)
    master.unit = u.meter

    with pytest.raises(u.UnitsError) as e:
        subtract_dark(ccd_data,
                      master,
                      exposure_time='exptime',
                      exposure_unit=u.second)
    assert "uncalibrated image" in str(e.value)
Exemplo n.º 18
0
def test_combiner_uncertainty_sum_mask():
    mask = np.zeros((10, 10), dtype=np.bool_)
    mask[5, 5] = True
    ccd_with_mask = CCDData(np.ones((10, 10)), unit=u.adu, mask=mask)
    ccd_list = [
        ccd_with_mask,
        CCDData(np.ones((10, 10)) * 2, unit=u.adu),
        CCDData(np.ones((10, 10)) * 3, unit=u.adu)
    ]
    c = Combiner(ccd_list)
    ccd = c.sum_combine()
    # Just the standard deviation of ccd data.
    ref_uncertainty = np.ones((10, 10)) * np.std([1, 2, 3])
    ref_uncertainty *= np.sqrt(3)
    ref_uncertainty[5, 5] = np.std([2, 3]) * np.sqrt(2)
    np.testing.assert_array_almost_equal(ccd.uncertainty.array,
                                         ref_uncertainty)
Exemplo n.º 19
0
def test_combiner_result_dtype():
    """Regression test: #391

    The result should have the appropriate dtype not the dtype of the first
    input."""
    ccd = CCDData(np.ones((3, 3), dtype=np.uint16), unit='adu')
    res = combine([ccd, ccd.multiply(2)])
    # The default dtype of Combiner is float64
    assert res.data.dtype == np.float64
    ref = np.ones((3, 3)) * 1.5
    np.testing.assert_array_almost_equal(res.data, ref)

    res = combine([ccd, ccd.multiply(2), ccd.multiply(3)], dtype=int)
    # The result dtype should be integer:
    assert res.data.dtype == np.int_
    ref = np.ones((3, 3)) * 2
    np.testing.assert_array_almost_equal(res.data, ref)
Exemplo n.º 20
0
def write_output(output: str, reffed_image: CCDData, scamp_data,
                 sextractor_data, reference_catalog_data):
    output, ext = os.path.splitext(output)
    if scamp_data:
        with open(output + '_scamp.head', 'w') as f:
            f.write(scamp_data)
    if sextractor_data:
        with open(output + '_sextractor.fits', 'wb') as f:
            f.write(sextractor_data)
    if reference_catalog_data:
        with open(output + '_reference.cat', 'wb') as f:
            f.write(reference_catalog_data)
    if output:
        try:
            reffed_image.write(output + ext, overwrite='True')
        except OSError as err:
            print(err, "writing output failed")
Exemplo n.º 21
0
    def rd(self,num, ext=0) :
        """ Read an image

        Args :
            num (str or int) : name or number of image to read
        Returns :
            image (CCDData ) : CCDData object
        """
        out=[]
        # loop over different channels (if any)
        idet=0 
        for form,gain,rn in zip(self.formstr,self.gain,self.rn) :
            # find the files that match the directory/format
            if type(num) is int :
                search=self.dir+'/'+self.root+form.format(num)+'.fits*'
            elif type(num) is str or type(num) is np.str_ :
                if num.find('/') >= 0 :
                    search=num+'*'
                else :
                    search=self.dir+'/*'+num+'*'
            else :
                print('stopping in rd... num:',num)
                pdb.set_trace()
            file=glob.glob(search)
            if len(file) == 0 : 
                print('cannot find file matching: '+search)
                return
            elif len(file) > 1 : 
                if self.verbose : print('more than one match found, using first!',file)
            file=file[0]

            # read the file into a CCDData object
            if self.verbose : print('  Reading file: {:s}'.format(file)) 
            try : im=CCDData.read(file,hdu=ext,unit='adu')
            except : raise RuntimeError('Error reading file: {:s}'.format(file))
            im.header['FILE'] = os.path.basename(file)
            if 'OBJECT' not in im.header :
                try: im.header['OBJECT'] = im.header['OBJNAME']
                except KeyError : im.header['OBJECT'] = im.header['FILE']

            # Add uncertainty (will be in error if there is an overscan, but redo with overscan subraction later)
            data=copy.copy(im.data)
            data[data<0] = 0.
            im.uncertainty = StdDevUncertainty(np.sqrt( data/gain + (rn/gain)**2 ))

            # Add mask
            if self.mask is not None : im.mask = self.mask
            else : im.mask = np.zeros(im.data.shape,dtype=bool)
            if self.badpix is not None :
                for badpix in self.badpix[idet] :
                    badpix.setval(im.mask,True)

            out.append(im)
            idet+=1

        # return the data
        if len(out) == 1 : return out[0]
        else : return out
Exemplo n.º 22
0
def trim_rotate_split(setup_object, setup_flat, setup_arc, dictionary, key):
    xmin3, xmax3 = 710, 860
    xmin7, xmax7 = 1205, 1350
    ############ split files
    for img in setup_object[key] + setup_flat[key] + setup_arc[key]:
        #        img3=re.sub('.fits','',img) + '_3.fits'
        #        img7=re.sub('.fits','',img) + '_7.fits'
        _header3 = dictionary[img]['fits'][3].header
        _header7 = dictionary[img]['fits'][7].header
        _data3 = np.transpose(dictionary[img]['fits'][3].data)
        _data7 = np.transpose(dictionary[img]['fits'][7].data)

        for ll in ['DATASEC', 'DETSIZE', 'DETSEC']:
            del _header3[ll]
            del _header7[ll]

        science3 = CCDData(data=_data3, header=_header3, unit=u.adu)
        science7 = CCDData(data=_data7, header=_header7, unit=u.adu)
        # add header from the
        _header3['exptime'] = dictionary[img]['EXPTIME']
        _header3['MJD-OBS'] = dictionary[img]['MJD-OBS']
        _header3['OBJECT'] = dictionary[img]['OBJECT']
        _header3['OBSTYPE'] = dictionary[img]['OBSTYPE']
        _header3['AIRMASS'] = dictionary[img]['AIRMASS']
        _header3['RA'] = dictionary[img]['RA']
        _header3['DEC'] = dictionary[img]['DEC']

        _header7['exptime'] = dictionary[img]['EXPTIME']
        _header7['MJD-OBS'] = dictionary[img]['MJD-OBS']
        _header7['OBJECT'] = dictionary[img]['OBJECT']
        _header7['OBSTYPE'] = dictionary[img]['OBSTYPE']
        _header7['AIRMASS'] = dictionary[img]['AIRMASS']
        _header7['RA'] = dictionary[img]['RA']
        _header7['DEC'] = dictionary[img]['DEC']

        #  trim images
        trimmed3 = ccdproc.trim_image(science3,
                                      fits_section='[:,' + str(xmin3) + ':' +
                                      str(xmax3) + ']')
        trimmed7 = ccdproc.trim_image(science7,
                                      fits_section='[:,' + str(xmin7) + ':' +
                                      str(xmax7) + ']')
        dictionary[img]['trimmed3'] = trimmed3
        dictionary[img]['trimmed7'] = trimmed7
    return dictionary
Exemplo n.º 23
0
def test_flat_correct_deviation(ccd_data):
    size = ccd_data.shape[0]
    ccd_data.unit = u.electron
    ccd_data = create_deviation(ccd_data, readnoise=5 * u.electron)
    # create the flat
    data = 2 * np.ones((size, size))
    flat = CCDData(data, meta=fits.header.Header(), unit=ccd_data.unit)
    flat = create_deviation(flat, readnoise=0.5 * u.electron)
    ccd_data = flat_correct(ccd_data, flat)
Exemplo n.º 24
0
 def ccd(self):
     data = 100 * np.ones(self.shape)
     uncert = StdDevUncertainty(np.sqrt(data), unit=self.unit)
     unit = self.unit
     return CCDData(data,
                    uncertainty=uncert,
                    mask=self.mask,
                    unit=unit,
                    meta=self.meta)
Exemplo n.º 25
0
def sersic_2d_image():
    """fixture for Sersic 2D image """
    path = "sersic_2d_image.fits.gz"
    sersic_2d_path = os.path.join(os.path.dirname(__file__), path)

    if not os.path.isfile(sersic_2d_path):
        make_image()

    return CCDData.read(sersic_2d_path)
Exemplo n.º 26
0
    def _parse_as_image(path):
        with fits.open(path) as hdulist:
            if 'BUNIT' not in hdulist[0].header:
                logging.warning(
                    "No 'BUNIT' defined in the header, using 'Jy'.")

            unit = hdulist[0].header.get('BUNIT', 'Jy')

        return CCDData.read(path, unit=unit)
Exemplo n.º 27
0
    def test_invalid_type(self):
        with pytest.raises(TypeError):
            _extract_fits(None)

        with pytest.raises(TypeError):
            _extract_fits(np.array([1, 2, 3]))

        with pytest.raises(TypeError):
            _extract_fits(CCDData([1, 2, 3], unit='adu'))
Exemplo n.º 28
0
    def __init__(
        self,
        ybounds=(425, 510),
        root_dir=None,
        inpaint_bad_pixels=False,
        inpaint_cosmic_rays=False,
    ):
        super().__init__()

        if root_dir is None:
            root_dir = "/home/gully/GitHub/ynot/test/data/2012-11-27/"
        self.root_dir = root_dir
        self.nirspec_collection = self.create_nirspec_collection()
        # self.unique_objects = self.get_unique_objects()
        # self.label_nirspec_nods()
        nodA_path = self.root_dir + "/NS.20121127.49332.fits"
        nodA_data = fits.open(nodA_path)[0].data.astype(np.float64)
        nodA = torch.tensor(nodA_data)

        nodB_path = self.root_dir + "/NS.20121127.50726.fits"
        nodB_data = fits.open(nodB_path)[0].data.astype(np.float64)
        nodB = torch.tensor(nodB_data)

        # Read in the Bad Pixel mask
        self.bpm = self.load_bad_pixel_mask()

        data_full = torch.stack([nodA, nodB])  # Creates NxHxW tensor
        # Inpaint bad pixels.  In the future we will simply neglect these pixels
        if inpaint_bad_pixels:
            data_full = self.inpaint_bad_pixels(data_full)

        self.n_images = len(data_full[:, 0, 0])
        self.gain = 5.8  # e/ADU, per NIRSPEC documentation

        if inpaint_cosmic_rays:
            for ii in range(self.n_images):
                nod_ccd = CCDData(data_full[ii].numpy(), unit="adu")
                out = ccdproc.cosmicray_lacosmic(
                    nod_ccd,
                    readnoise=23.0,
                    gain=self.gain,
                    verbose=False,
                    satlevel=1.0e7,
                    sigclip=7.0,
                    sepmed=False,
                    cleantype="medmask",
                    fsmode="median",
                )
                data_full[ii] = torch.tensor(out.data)
        else:
            data_full = data_full * self.gain

        data = data_full[:, ybounds[0]:ybounds[1], :]
        data = data.permute(0, 2, 1)

        self.pixels = data
        self.index = torch.tensor([0, 1])
Exemplo n.º 29
0
def kcwi_fits_reader(file):
    """A reader for KeckData objects.
    Currently this is a separate function, but should probably be
    registered as a reader similar to fits_ccddata_reader.
    Arguments:
    file -- The filename (or pathlib.Path) of the FITS file to open.
    """
    try:
        hdul = fits.open(file)
    except (FileNotFoundError, OSError) as e:
        print(e)
        raise e
    read_imgs = 0
    read_tabs = 0
    # primary image
    ccddata = CCDData(hdul['PRIMARY'].data,
                      meta=hdul['PRIMARY'].header,
                      unit='adu')
    read_imgs += 1
    # check for other legal components
    if 'UNCERT' in hdul:
        ccddata.uncertainty = hdul['UNCERT'].data
        read_imgs += 1
    if 'FLAGS' in hdul:
        ccddata.flags = hdul['FLAGS'].data
        read_imgs += 1
    if 'MASK' in hdul:
        ccddata.mask = hdul['MASK'].data
        read_imgs += 1
    if 'Exposure Events' in hdul:
        table = hdul['Exposure Events']
        read_tabs += 1
    else:
        table = None
    # prepare for floating point
    ccddata.data = ccddata.data.astype(np.float64)
    # Check for CCDCFG keyword
    if 'CCDCFG' not in ccddata.header:
        ccdcfg = ccddata.header['CCDSUM'].replace(" ", "")
        ccdcfg += "%1d" % ccddata.header['CCDMODE']
        ccdcfg += "%02d" % ccddata.header['GAINMUL']
        ccdcfg += "%02d" % ccddata.header['AMPMNUM']
        ccddata.header['CCDCFG'] = ccdcfg

    if ccddata:
        if 'BUNIT' in ccddata.header:
            ccddata.unit = ccddata.header['BUNIT']
            if ccddata.uncertainty:
                ccddata.uncertainty.unit = ccddata.header['BUNIT']
            # print("setting image units to " + ccddata.header['BUNIT'])

    logger.info("<<< read %d imgs and %d tables out of %d hdus in %s" %
                (read_imgs, read_tabs, len(hdul), file))
    return ccddata, table
Exemplo n.º 30
0
def test_subtractBias():
    inFiles = readFileToArr(os.path.join(testPath, 'BIAS.list'))
    otFiles = [addSuffixToFileName(fileName, 'ot') for fileName in inFiles]
    outArrs = subtractOverscan(inFiles,
                               overscanSection,
                               trimSection=trimSection,
                               fitsFilesOut=otFiles,
                               overwrite=True)
    print('len(inFiles) = ', len(inFiles), ', len(otFiles) = ', len(outArrs))
    meanDiffs = []
    for iFile in np.arange(0, len(inFiles), 1):
        meanA = np.mean(CCDData.read(inFiles[iFile], unit="adu"))
        meanB = np.mean(outArrs[iFile])
        meanDiff = meanA - meanB
        meanDiffs.append(meanDiff)
        print('iFile = ', iFile, ': meanA - meanB = ', meanDiff)

    # create master bias
    masterBias = os.path.join(testPath, 'combinedBias_ot.fits')
    combinedImage = combine(otFiles,
                            combinerMethod='median',
                            clippingMethod='sigma',
                            clippingParameters={
                                'niter': 0,
                                'low_thresh': -3.,
                                'high_thresh': 3.,
                                'func': np.ma.median
                            },
                            scaling=False,
                            fitsOutName=masterBias)
    print('average sigma 0: mean(combinedImage) = ', np.mean(combinedImage))

    otzFiles = [addSuffixToFileName(fileName, 'otz') for fileName in inFiles]
    outArrs = subtractBias(otFiles,
                           masterBias,
                           fitsFilesOut=otzFiles,
                           overwrite=True)
    for iFile in np.arange(0, len(inFiles), 1):
        meanA = np.mean(CCDData.read(otFiles[iFile], unit="adu"))
        meanB = np.mean(CCDData.read(otzFiles[iFile], unit="adu"))
        meanDiff = meanA - meanB
        print('iFile = ', iFile, ': meanA - meanB = ', meanDiff,
              ': difference to previous calculation = ',
              meanDiff - meanDiffs[iFile])
Exemplo n.º 31
0
    def bias_subtract(self):
        """

        :return:
        """
        if self.binning is None:
            raise InputError('Binning not set.')
        if self.debug:
            print("Subtracting bias from remaining images...")

        # Refresh the ImageFileCollection
        self.icl.refresh()

        # Load the appropriate bias frame to subtract
        if not os.path.isfile(f'{self.path}/{self.zerofn}'):
            self._biascombine(binning=self.binning)
        try:
            combined_bias = CCDData.read(f'{self.path}/{self.zerofn}')
        except FileNotFoundError:
            # Just skip the bias subtraction
            print(f"Skipping bias subtraction for lack of {self.zerofn}")
            return

        # Set up a progress bar, so we can see how the process is going...
        prog_bar = tqdm(total=len(self.icl.files),
                        unit='frame',
                        unit_scale=False,
                        colour='blue')

        # Loop through files,
        for ccd, file_name in self.icl.ccds(ccdsum=self.binning,
                                            bitpix=16,
                                            return_fname=True):

            # Fit the overscan section, subtract it, then trim the image
            ccd = _trim_oscan(ccd, self.biassec, self.trimsec)

            # Subtract master bias
            ccd = ccdp.subtract_bias(ccd, combined_bias)

            # Update the header
            ccd.header['HISTORY'] = PKG_NAME
            ccd.header['HISTORY'] = 'Bias-subtracted image saved: ' + \
                                    _savetime()
            ccd.header['HISTORY'] = f'Subtracted bias: {self.zerofn}'
            ccd.header['HISTORY'] = f'Original filename: {file_name}'

            # Save the result (suffix = 'b'); delete input file
            ccd.write(f'{self.path}/{file_name[:-5]}b{file_name[-5:]}',
                      overwrite=True)
            os.remove(f'{self.path}/{file_name}')

            # Update the progress bar
            prog_bar.update(1)
        # Close the progress bar, end of loop
        prog_bar.close()
Exemplo n.º 32
0
def test_combine_average_ccddata():
    fitsfile = get_pkg_data_filename('data/a8280271.fits')
    ccd = CCDData.read(fitsfile, unit=u.adu)
    ccd_list = [ccd] * 3
    c = Combiner(ccd_list)
    ccd_by_combiner = c.average_combine()

    avgccd = combine(ccd_list, output_file=None, method='average', unit=u.adu)
    # averaging same ccdData should give back same images
    np.testing.assert_array_almost_equal(avgccd.data, ccd_by_combiner.data)
Exemplo n.º 33
0
def test_combiner_mask():
    data = np.zeros((10, 10))
    data[5, 5] = 1
    mask = (data == 0)
    ccd = CCDData(data, unit=u.adu, mask=mask)
    ccd_list = [ccd, ccd, ccd]
    c = Combiner(ccd_list)
    assert c.data_arr.shape == (3, 10, 10)
    assert c.data_arr.mask.shape == (3, 10, 10)
    assert not c.data_arr.mask[0, 5, 5]
Exemplo n.º 34
0
def test_combine_average_ccddata():
    fitsfile = get_pkg_data_filename('data/a8280271.fits')
    ccd = CCDData.read(fitsfile, unit=u.adu)
    ccd_list = [ccd] * 3
    c = Combiner(ccd_list)
    ccd_by_combiner = c.average_combine()

    avgccd = combine(ccd_list, output_file=None, method='average', unit=u.adu)
    # averaging same ccdData should give back same images
    np.testing.assert_array_almost_equal(avgccd.data, ccd_by_combiner.data)
Exemplo n.º 35
0
    def divide(self, other):
        '''Divide this Measurement by another, propagating errors, units,  and updating identifiers

        :param other: a Measurement to divide
        :type other: :class:`Measurement`
        '''
        z = CCDData.divide(self, other)
        z = Measurement(z, unit=z._unit)
        z._identifier = self.id + '/' + other.id
        return z
Exemplo n.º 36
0
    def multiply(self, other):
        '''Multiply this Measurement by another, propagating errors, units,  and updating identifiers

        :param other: a Measurement to multiply
        :type other: :class:`Measurement`
        '''
        z = CCDData.multiply(self, other)
        z = Measurement(z, unit=z._unit)
        z._identifier = self.id + '*' + other.id
        return z
Exemplo n.º 37
0
def test_flat_correct_norm_value_bad_value(ccd_data):
    # Test that flat_correct raises the appropriate error if
    # it is given a bad norm_value. Bad means <=0.

    # create the flat, with some scatter
    data = np.random.normal(loc=1.0, scale=0.05, size=ccd_data.shape)
    flat = CCDData(data, meta=fits.Header(), unit=ccd_data.unit)
    with pytest.raises(ValueError) as e:
        flat_correct(ccd_data, flat, add_keyword=None, norm_value=-7)
    assert "norm_value must be" in str(e)
Exemplo n.º 38
0
def test_subtract_dark_fails(ccd_data):
    # None of these tests check a result so the content of the master
    # can be anything.
    ccd_data.header['exptime'] = 30.0
    master = ccd_data.copy()

    # Do we fail if we give one of dark_exposure, data_exposure but not both?
    with pytest.raises(TypeError):
        subtract_dark(ccd_data, master, dark_exposure=30 * u.second)
    with pytest.raises(TypeError):
        subtract_dark(ccd_data, master, data_exposure=30 * u.second)

    # Do we fail if we supply dark_exposure and data_exposure and exposure_time
    with pytest.raises(TypeError):
        subtract_dark(ccd_data, master, dark_exposure=10 * u.second,
                      data_exposure=10 * u.second,
                      exposure_time='exptime')

    # Fail if we supply none of the exposure-related arguments?
    with pytest.raises(TypeError):
        subtract_dark(ccd_data, master)

    # Fail if we supply exposure time but not a unit?
    with pytest.raises(TypeError):
        subtract_dark(ccd_data, master, exposure_time='exptime')

    # Fail if ccd_data or master are not CCDData objects?
    with pytest.raises(TypeError):
        subtract_dark(ccd_data.data, master, exposure_time='exptime')
    with pytest.raises(TypeError):
        subtract_dark(ccd_data, master.data, exposure_time='exptime')

    # Fail if units do not match...

    # ...when there is no scaling?
    master = CCDData(ccd_data)
    master.unit = u.meter

    with pytest.raises(u.UnitsError) as e:
        subtract_dark(ccd_data, master, exposure_time='exptime',
                      exposure_unit=u.second)
    assert "uncalibrated image" in str(e.value)
Exemplo n.º 39
0
def test_subtract_dark(ccd_data, explicit_times, scale, exposure_keyword):
    exptime = 30.0
    exptime_key = 'exposure'
    exposure_unit = u.second
    dark_level = 1.7
    master_dark_data = np.zeros_like(ccd_data.data) + dark_level
    master_dark = CCDData(master_dark_data, unit=u.adu)
    master_dark.header[exptime_key] = 2 * exptime
    dark_exptime = master_dark.header[exptime_key]
    ccd_data.header[exptime_key] = exptime
    dark_exposure_unit = exposure_unit
    if explicit_times:
        # test case when units of dark and data exposures are different
        dark_exposure_unit = u.minute
        dark_sub = subtract_dark(ccd_data, master_dark,
                                 dark_exposure=dark_exptime * dark_exposure_unit,
                                 data_exposure=exptime * exposure_unit,
                                 scale=scale, add_keyword=None)
    elif exposure_keyword:
        key = Keyword(exptime_key, unit=u.second)
        dark_sub = subtract_dark(ccd_data, master_dark,
                                 exposure_time=key,
                                 scale=scale, add_keyword=None)
    else:
        dark_sub = subtract_dark(ccd_data, master_dark,
                                 exposure_time=exptime_key,
                                 exposure_unit=u.second,
                                 scale=scale, add_keyword=None)

    dark_scale = 1.0
    if scale:
        dark_scale = float((exptime / dark_exptime) *
                           (exposure_unit / dark_exposure_unit))

    np.testing.assert_array_equal(ccd_data.data - dark_scale * dark_level,
                                  dark_sub.data)
    # Headers should have the same content...do they?
    assert dark_sub.header == ccd_data.header
    # But the headers should not be the same object -- a copy was made
    assert dark_sub.header is not ccd_data.header
Exemplo n.º 40
0
def test_combine_limitedmem_scale_fitsimages():
    fitsfile = get_pkg_data_filename('data/a8280271.fits')
    ccd = CCDData.read(fitsfile, unit=u.adu)
    ccd_list = [ccd] * 5
    c = Combiner(ccd_list)
    # scale each array to the mean of the first image
    scale_by_mean = lambda x: ccd.data.mean()/np.ma.average(x)
    c.scaling = scale_by_mean
    ccd_by_combiner = c.average_combine()

    fitsfilename_list = [fitsfile] * 5
    avgccd = combine(fitsfilename_list, output_file=None, method='average',
                     mem_limit=1e6, scale=scale_by_mean, unit=u.adu)

    np.testing.assert_array_almost_equal(
        avgccd.data, ccd_by_combiner.data, decimal=4)
Exemplo n.º 41
0
def test_combine_numpyndarray():
    """ Test of numpy ndarray implementation: #493

    Test the average combine using ``Combiner`` and ``combine`` with input
    ``img_list`` in the format of ``numpy.ndarray``.
    """
    fitsfile = get_pkg_data_filename('data/a8280271.fits')
    ccd = CCDData.read(fitsfile, unit=u.adu)
    ccd_list = [ccd] * 3
    c = Combiner(ccd_list)
    ccd_by_combiner = c.average_combine()

    fitsfilename_list = np.array([fitsfile] * 3)
    avgccd = combine(fitsfilename_list, output_file=None,
                     method='average', unit=u.adu)
    # averaging same fits images should give back same fits image
    np.testing.assert_array_almost_equal(avgccd.data, ccd_by_combiner.data)
Exemplo n.º 42
0
def combine(img_list, output_file=None,
            method='average', weights=None, scale=None, mem_limit=16e9,
            clip_extrema=False, nlow=1, nhigh=1,
            minmax_clip=False, minmax_clip_min=None, minmax_clip_max=None,
            sigma_clip=False,
            sigma_clip_low_thresh=3, sigma_clip_high_thresh=3,
            sigma_clip_func=ma.mean, sigma_clip_dev_func=ma.std,
            dtype=None, combine_uncertainty_function=None, **ccdkwargs):
    """
    Convenience function for combining multiple images.

    Parameters
    -----------
    img_list : `numpy.ndarray`, list or str
        A list of fits filenames or `~astropy.nddata.CCDData` objects that will be
        combined together. Or a string of fits filenames separated by comma
        ",".

    output_file : str or None, optional
        Optional output fits file-name to which the final output can be
        directly written.
        Default is ``None``.

    method : str, optional
        Method to combine images:

        - ``'average'`` : To combine by calculating the average.
        - ``'median'`` : To combine by calculating the median.
        - ``'sum'`` : To combine by calculating the sum.

        Default is ``'average'``.

    weights : `numpy.ndarray` or None, optional
        Weights to be used when combining images.
        An array with the weight values. The dimensions should match the
        the dimensions of the data arrays being combined.
        Default is ``None``.

    scale : function or `numpy.ndarray`-like or None, optional
        Scaling factor to be used when combining images.
        Images are multiplied by scaling prior to combining them. Scaling
        may be either a function, which will be applied to each image
        to determine the scaling factor, or a list or array whose length
        is the number of images in the `Combiner`. Default is ``None``.

    mem_limit : float, optional
        Maximum memory which should be used while combining (in bytes).
        Default is ``16e9``.

    clip_extrema : bool, optional
        Set to True if you want to mask pixels using an IRAF-like minmax
        clipping algorithm.  The algorithm will mask the lowest nlow values and
        the highest nhigh values before combining the values to make up a
        single pixel in the resulting image.  For example, the image will be a
        combination of Nimages-low-nhigh pixel values instead of the
        combination of Nimages.

        Parameters below are valid only when clip_extrema is set to True,
        see :meth:`Combiner.clip_extrema` for the parameter description:

        - ``nlow`` : int or None, optional
        - ``nhigh`` : int or None, optional


    minmax_clip : bool, optional
        Set to True if you want to mask all pixels that are below
        minmax_clip_min or above minmax_clip_max before combining.
        Default is ``False``.

        Parameters below are valid only when minmax_clip is set to True, see
        :meth:`Combiner.minmax_clipping` for the parameter description:

        - ``minmax_clip_min`` : float or None, optional
        - ``minmax_clip_max`` : float or None, optional

    sigma_clip : bool, optional
        Set to True if you want to reject pixels which have deviations greater
        than those
        set by the threshold values. The algorithm will first calculated
        a baseline value using the function specified in func and deviation
        based on sigma_clip_dev_func and the input data array. Any pixel with
        a deviation from the baseline value greater than that set by
        sigma_clip_high_thresh or lower than that set by sigma_clip_low_thresh
        will be rejected.
        Default is ``False``.

        Parameters below are valid only when sigma_clip is set to True. See
        :meth:`Combiner.sigma_clipping` for the parameter description.

        - ``sigma_clip_low_thresh`` : positive float or None, optional
        - ``sigma_clip_high_thresh`` : positive float or None, optional
        - ``sigma_clip_func`` : function, optional
        - ``sigma_clip_dev_func`` : function, optional

    dtype : str or `numpy.dtype` or None, optional
        The intermediate and resulting ``dtype`` for the combined CCDs. See
        `ccdproc.Combiner`. If ``None`` this is set to ``float64``.
        Default is ``None``.

    combine_uncertainty_function : callable, None, optional
        If ``None`` use the default uncertainty func when using average, median or
        sum combine, otherwise use the function provided.
        Default is ``None``.

    ccdkwargs : Other keyword arguments for `astropy.nddata.fits_ccddata_reader`.

    Returns
    -------
    combined_image : `~astropy.nddata.CCDData`
        CCDData object based on the combined input of CCDData objects.
    """
    if not isinstance(img_list, list):
        # If not a list, check whether it is a numpy ndarray or string of
        # filenames separated by comma
        if isinstance(img_list, np.ndarray):
            img_list = img_list.tolist()
        elif isinstance(img_list, str) and (',' in img_list):
            img_list = img_list.split(',')
        else:
            raise ValueError(
                "unrecognised input for list of images to combine.")

    # Select Combine function to call in Combiner
    if method == 'average':
        combine_function = 'average_combine'
    elif method == 'median':
        combine_function = 'median_combine'
    elif method == 'sum':
        combine_function = 'sum_combine'
    else:
        raise ValueError("unrecognised combine method : {0}.".format(method))

    # First we create a CCDObject from first image for storing output
    if isinstance(img_list[0], CCDData):
        ccd = img_list[0].copy()
    else:
        # User has provided fits filenames to read from
        ccd = CCDData.read(img_list[0], **ccdkwargs)

    # If uncertainty_func is given for combine this will create an uncertainty
    # even if the originals did not have one. In that case we need to create
    # an empty placeholder.
    if ccd.uncertainty is None and combine_uncertainty_function is not None:
        ccd.uncertainty = StdDevUncertainty(np.zeros(ccd.data.shape))

    if dtype is None:
        dtype = np.float64

    # Convert the master image to the appropriate dtype so when overwriting it
    # later the data is not downcast and the memory consumption calculation
    # uses the internally used dtype instead of the original dtype. #391
    if ccd.data.dtype != dtype:
        ccd.data = ccd.data.astype(dtype)

    size_of_an_img = ccd.data.nbytes
    try:
        size_of_an_img += ccd.uncertainty.array.nbytes
    # In case uncertainty is None it has no "array" and in case the "array" is
    # not a numpy array:
    except AttributeError:
        pass
    # Mask is enforced to be a numpy.array across astropy versions
    if ccd.mask is not None:
        size_of_an_img += ccd.mask.nbytes
    # flags is not necessarily a numpy array so do not fail with an
    # AttributeError in case something was set!
    # TODO: Flags are not taken into account in Combiner. This number is added
    #       nevertheless for future compatibility.
    try:
        size_of_an_img += ccd.flags.nbytes
    except AttributeError:
        pass

    no_of_img = len(img_list)

    # determine the number of chunks to split the images into
    no_chunks = int((size_of_an_img * no_of_img) / mem_limit) + 1
    if no_chunks > 1:
        log.info('splitting each image into {0} chunks to limit memory usage '
                 'to {1} bytes.'.format(no_chunks, mem_limit))
    xs, ys = ccd.data.shape

    # Calculate strides for loop
    xstep, ystep = _calculate_step_sizes(xs, ys, no_chunks)

    # Dictionary of Combiner properties to set and methods to call before
    # combining
    to_set_in_combiner = {}
    to_call_in_combiner = {}

    # Define all the Combiner properties one wants to apply before combining
    # images
    if weights is not None:
        to_set_in_combiner['weights'] = weights

    if scale is not None:
        # If the scale is a function, then scaling function need to be applied
        # on full image to obtain scaling factor and create an array instead.
        if callable(scale):
            scalevalues = []
            for image in img_list:
                if isinstance(image, CCDData):
                    imgccd = image
                else:
                    imgccd = CCDData.read(image, **ccdkwargs)

                scalevalues.append(scale(imgccd.data))

            to_set_in_combiner['scaling'] = np.array(scalevalues)
        else:
            to_set_in_combiner['scaling'] = scale

    if clip_extrema:
        to_call_in_combiner['clip_extrema'] = {'nlow': nlow,
                                               'nhigh': nhigh}

    if minmax_clip:
        to_call_in_combiner['minmax_clipping'] = {'min_clip': minmax_clip_min,
                                                  'max_clip': minmax_clip_max}

    if sigma_clip:
        to_call_in_combiner['sigma_clipping'] = {
            'low_thresh': sigma_clip_low_thresh,
            'high_thresh': sigma_clip_high_thresh,
            'func': sigma_clip_func,
            'dev_func': sigma_clip_dev_func}

    # Finally Run the input method on all the subsections of the image
    # and write final stitched image to ccd
    for x in range(0, xs, xstep):
        for y in range(0, ys, ystep):
            xend, yend = min(xs, x + xstep), min(ys, y + ystep)
            ccd_list = []
            for image in img_list:
                if isinstance(image, CCDData):
                    imgccd = image
                else:
                    imgccd = CCDData.read(image, **ccdkwargs)

                # Trim image and copy
                # The copy is *essential* to avoid having a bunch
                # of unused file references around if the files
                # are memory-mapped. See this PR for details
                # https://github.com/astropy/ccdproc/pull/630
                ccd_list.append(imgccd[x:xend, y:yend].copy())

            # Create Combiner for tile
            tile_combiner = Combiner(ccd_list, dtype=dtype)

            # Set all properties and call all methods
            for to_set in to_set_in_combiner:
                setattr(tile_combiner, to_set, to_set_in_combiner[to_set])
            for to_call in to_call_in_combiner:
                getattr(tile_combiner, to_call)(**to_call_in_combiner[to_call])

            # Finally call the combine algorithm
            combine_kwds = {}
            if combine_uncertainty_function is not None:
                combine_kwds['uncertainty_func'] = combine_uncertainty_function

            comb_tile = getattr(tile_combiner, combine_function)(**combine_kwds)

            # add it back into the master image
            ccd.data[x:xend, y:yend] = comb_tile.data
            if ccd.mask is not None:
                ccd.mask[x:xend, y:yend] = comb_tile.mask
            if ccd.uncertainty is not None:
                ccd.uncertainty.array[x:xend, y:yend] = comb_tile.uncertainty.array
            # Free up memory to try to stay under user's limit
            del comb_tile
            del tile_combiner
            del ccd_list

    # Write fits file if filename was provided
    if output_file is not None:
        ccd.write(output_file)

    return ccd
Exemplo n.º 43
0
    def median_combine(self, median_func=ma.median, scale_to=None,
                       uncertainty_func=sigma_func):
        """
        Median combine a set of arrays.

        A `~astropy.nddata.CCDData` object is returned with the data property set to
        the median of the arrays. If the data was masked or any data have been
        rejected, those pixels will not be included in the median. A mask will
        be returned, and if a pixel has been rejected in all images, it will be
        masked. The uncertainty of the combined image is set by 1.4826 times
        the median absolute deviation of all input images.

        Parameters
        ----------
        median_func : function, optional
            Function that calculates median of a `numpy.ma.MaskedArray`.
            Default is `numpy.ma.median`.

        scale_to : float or None, optional
            Scaling factor used in the average combined image. If given,
            it overrides `scaling`.
            Defaults to None.

        uncertainty_func : function, optional
            Function to calculate uncertainty.
            Defaults is `~ccdproc.sigma_func`.

        Returns
        -------
        combined_image: `~astropy.nddata.CCDData`
            CCDData object based on the combined input of CCDData objects.

        Warnings
        --------
        The uncertainty currently calculated using the median absolute
        deviation does not account for rejected pixels.
        """
        # set the data
        data = median_func(self._get_scaled_data(scale_to), axis=0)

        # set the mask
        masked_values = self.data_arr.mask.sum(axis=0)
        mask = (masked_values == len(self.data_arr))

        # set the uncertainty
        uncertainty = uncertainty_func(self.data_arr, axis=0)
        # Divide uncertainty by the number of pixel (#309)
        uncertainty /= np.sqrt(len(self.data_arr) - masked_values)
        # Convert uncertainty to plain numpy array (#351)
        # There is no need to care about potential masks because the
        # uncertainty was calculated based on the data so potential masked
        # elements are also masked in the data. No need to keep two identical
        # masks.
        uncertainty = np.asarray(uncertainty)

        # create the combined image with a dtype matching the combiner
        combined_image = CCDData(np.asarray(data.data, dtype=self.dtype),
                                 mask=mask, unit=self.unit,
                                 uncertainty=StdDevUncertainty(uncertainty))

        # update the meta data
        combined_image.meta['NCOMBINE'] = len(self.data_arr)

        # return the combined image
        return combined_image
Exemplo n.º 44
0
parser.add_argument('--irafbiassec', dest = 'irafbiassec', default= '[4100:4150, 1:4150]', help = 'biassec in iraf notation.  default is [4100:4150, 1:4150], which applies to HDI camera')
parser.add_argument('--iraftrimsec', dest = 'iraftrimsec', default= '[1:4095, 1:4109]', help = 'biassec in iraf notation.  default is [4100:4150, 1:4150], which applies to HDI camera')
#parser.add_argument('--gain', dest = 'gain', default= 1.3, help = 'gain in e-/ADU.  default is 1.3, which applies to HDI camera')
#parser.add_argument('--rdnoise', dest = 'rdnoise', default= 7.3, help = 'gain in e-/ADU.  default is 1.3, which applies to HDI camera')


args = parser.parse_args()
files = sorted(glob.glob(args.filestring+'*.fits'))
nfiles=len(files)

poly_model = models.Polynomial1D(1)

for f in files:
    # read in image
    # was having trouble getting image into the format that ccdproc wants
    print 'working on ',f
    # convert data to CCDData format and save header
    ccd = CCDData.read(f, unit='adu')

    # subtract overscan
    o_subtracted = ccdproc.subtract_overscan(ccd, fits_section = args.irafbiassec, model=poly_model)
    header['HISTORY'] = 'overscan subtracted '+args.irafbiassec

    # trim image
    trimmed = ccdproc.trim_image(o_subtracted, fits_section = args.iraftrimsec)

    head_updates = {'CCDSEC':args.iraftrimsec}
    
    trimmed.write('tr'+f,add_keyword=head_updates)