Example #1
0
    def correct_zero(data, header, prefix, zero_file):
        """
        Subtract zero from data.

        Args:

            data (numpy.ndarray) : A 2D numpy array that contains the data.

            header (astropy.io.fits.Header) : A header that will be updated.

            prefix (str) : File prefix that is added after each reduce.

            zero_file (str | None) : Master Bias filename. If None is given,
            nothing is done.

        """
        from os.path import abspath

        if zero_file is not None:

            zero = _pyfits.open(abspath(zero_file))[0]
            data = data - zero.data
            header['BIASFILE'] = zero_file
            prefix = 'z' + prefix

        return data, header, prefix
Example #2
0
    def correct_flat(data, header, prefix, flat_file):
        """
        Divide the image by the master flat file and add HISTORY to header.

        Args:

            data (numpy.ndarray) : A 2D numpy array that contains the data.

            header (astropy.io.fits.Header) : A header that will be updated.

            prefix (str) : File prefix that is added after each reduce.

            flat_file (str or None) : Master flat filename. If None is given,
            nothing is done.
        """
        if not isinstance(prefix, str):
            raise (TypeError,
                   'Expected string but found %s instead.' % prefix.__class__)

        if flat_file is not None:
            flat = _pyfits.open(flat_file)[0]

            data /= flat.data
            header['FLATFILE'] = flat_file
            prefix = 'f' + prefix

        return data, header, prefix
Example #3
0
    def correct_dark(data, header, prefix, dark_file=None):
        """
        Subtract the dark file from data and add HISTORY to header.

        Args:

            data (numpy.ndarray) : A 2D numpy array that contains the data.

            header (astropy.io.fits.Header) : A header that will be updated.

            prefix : str
                File prefix that is added after each reduce.

            dark_file: str | None
                Master Dark filename. If None is given, nothing is done.
        """

        if not isinstance(prefix, str):
            raise (TypeError,
                   'Expected string but found %s instead.' % prefix.__class__)

        if dark_file is not None:

            dark = _pyfits.open(dark_file)[0]
            dark.data = dark.data / float(dark.header['EXPTIME'])

            data = data - dark.data * header['EXPTIME']
            header['DARKFILE'] = dark_file
            prefix = 'd' + prefix

        return data, header, prefix
Example #4
0
def build_table(list_of_files):
    """
    Return a pandas.DataFrame used to organize the pipeline.

    Args:
        list_of_files (list) : list of files to be included in the dataframe.

    Returns:
        table (pandas.Dataframe) : a dataframe with information needed for the
        pipeline.
    """
    log.info('Reading raw files')

    table = pd.DataFrame(
        columns=[
            'filename',
            'instrume',
            'obstype',
            'filters',
            'filter1',
            'filter2',
            'binning',
            'dark_file',
            'flat_file',
            'zero_file',
        ]
    )

    list_of_files.sort()
    for _file in list_of_files:

        try:
            hdu = pyfits.open(_file)
        except OSError:
            log.warning("Could not read file: {}".format(_file))
            continue

        if numpy.std(hdu[1].data.ravel()) == 0:
            log.warning("Bad data found on file: {}".format(_file))
            continue

        row = pd.Series(data={
            'filename': _file,
            'obstype': hdu[0].header['obstype'],
            'instrume': hdu[0].header['instrume'].strip().upper(),
            'filters': hdu[0].header['filters'],
            'filter1': hdu[0].header['filter1'],
            'filter2': hdu[0].header['filter2'],
            'binning': hdu[1].header['ccdsum'].strip(),
            'dark_file': None,
            'flat_file': None,
            'zero_file': None,
        })

        table = table.append(row, ignore_index=True, sort=True)

    return table
Example #5
0
    def get_header(self, hdu_source):
        """
        Return the header of the primary HDU extension of a FITS file.

        Args:

            hdu_source (str or astropy.io.fits.HDUList) : HDUList or name of the
            file which contains a HDUList.
        """
        from os.path import exists

        if isinstance(hdu_source, str):

            if not exists(hdu_source):
                raise (IOError, '%s file not found.' % hdu_source)

            hdu_source = _pyfits.open(hdu_source)

        h0 = hdu_source[0].header
        h1 = hdu_source[1].header

        h0.append('UNITS')
        h0.set('UNITS', value='ADU', comment='Pixel intensity units.')

        # Save the CCD binning in the main header
        h0['CCDSUM'] = h1['CCDSUM']
        h0['DETSEC'] = h1['DETSEC']

        # Save the area that corresponds to each amplifier
        bin_size = _np.array(h0['CCDSUM'].split(' '), dtype=int)

        dx, dy = slices.iraf2python(h0['DETSEC'])
        dx, dy = dx // bin_size[0], dy // bin_size[1]

        h0['AMP_SEC1'] = slices.python2iraf(dx[0], dx[1], dy[0], dy[1])

        h0['AMP_SEC2'] = slices.python2iraf(dx[0] + dx[1], dx[1] + dx[1],
                                            dy[0], dy[1])

        h0['AMP_SEC3'] = slices.python2iraf(dx[0], dx[1], dy[0] + dy[1],
                                            dy[1] + dy[1])

        h0['AMP_SEC4'] = slices.python2iraf(dx[0] + dx[1], dx[1] + dx[1],
                                            dy[0] + dy[1], dy[1] + dy[1])

        return h0
Example #6
0
def process_object_files(df, red_path):
    """
    Args:

        df (pandas.DataFrame) : a data-frame containing the all the data being
        processed.

        red_path (str) : the path where the reduced data is stored.

    Returns:

        updated_table (pandas.DataFrame) : an updated data-frame where each
        file now is attached to the corresponding master Zero file.
    """
    sami_pipeline = reduce.SamiReducer()
    sami_pipeline.cosmic_rays = True

    log.info('Processing OBJECT files.')

    object_df = df.loc[df.obstype.values == 'OBJECT']

    for index, row in object_df.iterrows():

        sami_pipeline.zero_file = row.zero_file
        sami_pipeline.dark_file = row.dark_file
        sami_pipeline.flat_file = row.flat_file
        obj_file = row.filename

        path, fname = os.path.split(obj_file)
        prefix = sami_pipeline.get_prefix()
        output_obj_file = os.path.join(red_path, prefix + fname)

        if os.path.exists(output_obj_file):
            log.warning(
                'Skipping existing OBJECT file: {}'.format(output_obj_file))
            continue

        log.info('Processing OBJECT file: {}'.format(obj_file))

        hdul = pyfits.open(obj_file)
        data, header, prefix = sami_pipeline.reduce(hdul)
        pyfits.writeto(output_obj_file, data, header)

    return df
Example #7
0
def process_zero_files(df, red_path):
    """
    Args:
        df (pandas.DataFrame) : a data-frame containing the all the data being
        processed.
        red_path (str) : the path where the reduced data is stored.

    Returns:
        updated_table (pandas.DataFrame) : an updated data-frame where each
        file now is attached to the corresponding master Zero file.
    """
    sami_pipeline = reduce.SamiReducer()
    sami_pipeline.cosmic_rays = True

    binning = df.binning.unique()

    log.debug('Binning formats found in data: ')
    for b in binning:
        log.debug('   {:s}'.format(b))

    for b in binning:

        bx, by = b.split(' ')
        log.info('Processing ZERO files with binning: {} x {}'.format(bx, by))

        mask1 = df.obstype.values == 'ZERO'
        mask2 = df.binning.values == b

        zero_table = df.loc[mask1 & mask2]
        zero_table = zero_table.sort_values('filename')

        zero_list = []
        for index, row in zero_table.iterrows():

            sami_pipeline.zero_file = None
            sami_pipeline.flat_file = None
            zero_file = row.filename

            path, fname = os.path.split(zero_file)
            prefix = sami_pipeline.get_prefix()
            output_zero_file = os.path.join(red_path, prefix + fname)

            zero_list.append(prefix + fname)

            if os.path.exists(output_zero_file):
                log.warning('Skipping existing file: {}'.format(
                    output_zero_file))
                continue

            log.info('Processing ZERO file: {}'.format(zero_file))

            hdul = pyfits.open(zero_file)
            data, header, prefix = sami_pipeline.reduce(hdul)
            pyfits.writeto(output_zero_file, data, header)

        if len(zero_list) == 0:
                continue

        zero_list_name = os.path.join(red_path, "0Zero{}x{}".format(bx, by))

        with open(zero_list_name, 'w') as zero_list_buffer:
            for zero_file in zero_list:
                zero_list_buffer.write('{:s}\n'.format(zero_file))

        log.info('Combining ZERO files.')
        master_zero = zero_list_name + '.fits'

        if os.path.exists(master_zero):
            log.warning(
                'Skipping existing MASTER ZERO: {:s}'.format(master_zero))

        else:

            log.info("Writing master zero to: {}".format(master_zero))
            zero_combine_files = [os.path.join(red_path, f) for f in zero_list]

            zero_combine = combine.ZeroCombine(input_list=zero_combine_files,
                                               output_file=master_zero)
            zero_combine.run()
            log.info('Done.')

        mask1 = df['obstype'].values != 'ZERO'
        mask2 = df['binning'].values == b
        df.loc[mask1 & mask2, 'zero_file'] = master_zero

    return df
Example #8
0
def process_flat_files(df, red_path):
    """
    Args:
        df (pandas.DataFrame) : a data-frame containing the all the data being
        processed.
        red_path (str) : the path where the reduced data is stored.

    Returns:
        updated_table (pandas.DataFrame) : an updated data-frame where each
        file now is attached to the corresponding master Zero file.
    """
    log.info('Processing FLAT files (SFLAT + DFLAT)')
    sami_pipeline = reduce.SamiReducer()

    binning = df.binning.unique()

    for b in binning:

        bx, by = b.split(' ')
        log.info('Processing FLAT files with binning: {} x {}'.format(bx, by))

        mask1 = (df.obstype.values == 'SFLAT') | (df.obstype.values == 'DFLAT')
        mask2 = df.binning.values == b
        flat_df = df.loc[mask1 & mask2]

        filters_used = flat_df.filters.unique()

        for _filter in filters_used:

            log.info('Processing FLATs for filter: {}'.format(_filter))

            filter_flat_df = flat_df.loc[flat_df.filters.values == _filter]
            filter_flat_df.sort_values('filename')

            log.info(
                'Filter Wheel 1: {}'.format(filter_flat_df.filter1.unique()[0]))

            log.info(
                'Filter Wheel 2: {}'.format(filter_flat_df.filter2.unique()[0]))

            flat_list = []
            for index, row in filter_flat_df.iterrows():

                sami_pipeline.zero_file = row.zero_file
                sami_pipeline.dark_file = row.dark_file
                sami_pipeline.flat_file = None
                flat_file = row.filename
                prefix = sami_pipeline.get_prefix()

                path, fname = os.path.split(flat_file)
                output_flat = os.path.join(red_path, prefix + fname)
                flat_list.append(prefix + fname)

                if os.path.exists(output_flat):
                    log.warning(
                        'Skipping existing FLAT file: {}'.format(output_flat))
                    continue

                log.info('Processing FLAT file: {}'.format(flat_file))

                hdul = pyfits.open(flat_file)
                data, header, prefix = sami_pipeline.reduce(hdul)
                pyfits.writeto(output_flat, data, header)

            if len(flat_list) == 0:
                continue

            flat_list_name = os.path.join(
                red_path, "1FLAT_{}x{}_{}".format(bx, by, _filter))

            with open(flat_list_name, 'w') as flat_list_buffer:
                for flat_file in flat_list:
                    flat_list_buffer.write('{:s}\n'.format(flat_file))

            master_flat = flat_list_name + '.fits'

            if os.path.exists(master_flat):
                log.warning(
                    'Skipping existing MASTER FLAT: {:s}'.format(master_flat))
            else:
                log.info('Writing master FLAT to file: {}'.format(master_flat))

            flat_combine_files = [os.path.join(red_path, f) for f in flat_list]

            flat_combine = combine.FlatCombine(
                 input_list=flat_combine_files, output_file=master_flat)

            flat_combine.run()

            mask1 = df['obstype'].values == 'OBJECT'
            mask2 = df['binning'].values == b
            mask3 = df['filters'].values == _filter
            df.loc[mask1 & mask2 & mask3, 'flat_file'] = master_flat

    return df