Exemplo n.º 1
0
def process_object_files(df, red_path):
    """
    Args:
        df (pandas.DataFrame) : a data-frame containing the all the data being
        processed.
        red_path (str) : the path where the reduced data is stored.

    Returns:
        updated_table (pandas.DataFrame) : an updated data-frame where each
        file now is attached to the corresponding master Zero file.
    """
    soi_merger = reduce.SoiReducer()
    soi_merger.cosmic_rays = True
    soi_merger.clean = True

    log.info('Processing OBJECT files.')

    object_df = df.loc[df.obstype.values == 'OBJECT']

    for index, row in object_df.iterrows():

        soi_merger.zero_file = row.zero_file
        soi_merger.flat_file = row.flat_file
        obj_file = row.filename

        path, fname = os.path.split(obj_file)
        prefix = soi_merger.get_prefix()
        output_obj_file = os.path.join(path, 'RED', prefix + fname)

        if os.path.exists(output_obj_file):
            log.warning(
                'Skipping existing OBJECT file: {}'.format(output_obj_file))
            continue

        log.info('Processing OBJECT file: {}'.format(obj_file))

        d = soi_merger.merge(obj_file)
        h = soi_merger.get_header(obj_file)
        h = soi_merger.create_wcs(d, h)

        d, h, p = soi_merger.__reduce(d, h)

        pyfits.writeto(output_obj_file, d, h)

    return df
Exemplo n.º 2
0
def process_object_files(df, red_path):
    """
    Args:

        df (pandas.DataFrame) : a data-frame containing the all the data being
        processed.

        red_path (str) : the path where the reduced data is stored.

    Returns:

        updated_table (pandas.DataFrame) : an updated data-frame where each
        file now is attached to the corresponding master Zero file.
    """
    sami_pipeline = reduce.SamiReducer()
    sami_pipeline.cosmic_rays = True

    log.info('Processing OBJECT files.')

    object_df = df.loc[df.obstype.values == 'OBJECT']

    for index, row in object_df.iterrows():

        sami_pipeline.zero_file = row.zero_file
        sami_pipeline.dark_file = row.dark_file
        sami_pipeline.flat_file = row.flat_file
        obj_file = row.filename

        path, fname = os.path.split(obj_file)
        prefix = sami_pipeline.get_prefix()
        output_obj_file = os.path.join(red_path, prefix + fname)

        if os.path.exists(output_obj_file):
            log.warning(
                'Skipping existing OBJECT file: {}'.format(output_obj_file))
            continue

        log.info('Processing OBJECT file: {}'.format(obj_file))

        hdul = pyfits.open(obj_file)
        data, header, prefix = sami_pipeline.reduce(hdul)
        pyfits.writeto(output_obj_file, data, header)

    return df
Exemplo n.º 3
0
def process_zero_files(df, red_path):
    """
    Args:
        df (pandas.DataFrame) : a data-frame containing the all the data being
        processed.
        red_path (str) : the path where the reduced data is stored.

    Returns:
        updated_table (pandas.DataFrame) : an updated data-frame where each
        file now is attached to the corresponding master Zero file.
    """
    soi_merger = reduce.SoiReducer()

    binning = df.binning.unique()

    log.debug('Binning formats found in data: ')
    for b in binning:
        log.debug('   {:s}'.format(b))

    for b in binning:

        bx, by = b.split(' ')
        log.info('Processing ZERO files with binning: {} x {}'.format(bx, by))

        mask1 = df.obstype.values == 'ZERO'
        mask2 = df.binning.values == b

        zero_table = df.loc[mask1 & mask2]
        zero_table = zero_table.sort_values('filename')

        zero_list = []
        for index, row in zero_table.iterrows():

            soi_merger.zero_file = None
            soi_merger.flat_file = None
            soi_merger.clean = True
            zero_file = row.filename

            path, fname = os.path.split(zero_file)
            prefix = soi_merger.get_prefix()
            output_zero_file = os.path.join(red_path, prefix + fname)

            zero_list.append(prefix + fname)

            if os.path.exists(output_zero_file):
                log.warning(
                    'Skipping existing file: {}'.format(output_zero_file))
                continue

            log.info('Processing ZERO file: {}'.format(zero_file))

            data = soi_merger.merge(zero_file)
            header = soi_merger.get_header(zero_file)

            log.debug('Data format: {0[0]:d} x {0[1]:d}'.format(data.shape))

            data, header, prefix = soi_merger.__reduce(data, header)
            pyfits.writeto(output_zero_file, data, header)

        zero_list_name = os.path.join(red_path, "0Zero{}x{}".format(bx, by))

        with open(zero_list_name, 'w') as zero_list_buffer:
            for zero_file in zero_list:
                zero_list_buffer.write('{:s}\n'.format(zero_file))

        log.info('Combining ZERO files.')
        master_zero = zero_list_name + '.fits'

        if os.path.exists(master_zero):
            log.warning(
                'Skipping existing MASTER ZERO: {:s}'.format(master_zero))

        else:

            log.info("Writing master zero to: {}".format(master_zero))
            zero_combine_files = [os.path.join(red_path, f) for f in zero_list]

            zero_combine = combine.ZeroCombine(input_list=zero_combine_files,
                                               output_file=master_zero)
            zero_combine.run()
            log.info('Done.')

        mask1 = df['obstype'].values != 'ZERO'
        mask2 = df['binning'].values == b
        df.loc[mask1 & mask2, 'zero_file'] = master_zero

    return df
Exemplo n.º 4
0
def process_flat_files(df, red_path):
    """
    Args:
        df (pandas.DataFrame) : a data-frame containing the all the data being
        processed.
        red_path (str) : the path where the reduced data is stored.

    Returns:
        updated_table (pandas.DataFrame) : an updated data-frame where each
        file now is attached to the corresponding master Zero file.
    """
    log.info('Processing FLAT files (SFLAT + DFLAT)')
    soi_merger = reduce.SoiReducer()
    soi_merger.clean = True

    binning = df.binning.unique()

    for b in binning:

        bx, by = b.split(' ')
        log.info('Processing FLAT files with binning: {} x {}'.format(bx, by))

        mask1 = (df.obstype.values == 'SFLAT') | (df.obstype.values == 'DFLAT')
        mask2 = df.binning.values == b
        flat_df = df.loc[mask1 & mask2]

        filters_used = flat_df.filters.unique()

        for _filter in filters_used:

            log.info('Processing FLATs for filter: {}'.format(_filter))

            filter_flat_df = flat_df.loc[flat_df.filters.values == _filter]
            filter_flat_df.sort_values('filename')

            log.info('Filter Wheel 1: {}'.format(
                filter_flat_df.filter1.unique()[0]))

            log.info('Filter Wheel 2: {}'.format(
                filter_flat_df.filter2.unique()[0]))

            flat_list = []
            for index, row in filter_flat_df.iterrows():

                soi_merger.zero_file = row.zero_file
                soi_merger.flat_file = None
                flat_file = row.filename
                prefix = soi_merger.get_prefix()

                path, fname = os.path.split(flat_file)
                output_flat = os.path.join(red_path, prefix + fname)
                flat_list.append(prefix + fname)

                if os.path.exists(output_flat):
                    log.warning(
                        'Skipping existing FLAT file: {}'.format(output_flat))
                    continue

                log.info('Processing FLAT file: {}'.format(flat_file))

                d = soi_merger.merge(flat_file)
                h = soi_merger.get_header(flat_file)

                d, h, p = soi_merger.__reduce(d, h)
                pyfits.writeto(output_flat, d, h)

            flat_list_name = os.path.join(
                red_path, "1FLAT_{}x{}_{}".format(bx, by, _filter))

            with open(flat_list_name, 'w') as flat_list_buffer:
                for flat_file in flat_list:
                    flat_list_buffer.write('{:s}\n'.format(flat_file))

            master_flat = flat_list_name + '.fits'

            if os.path.exists(master_flat):
                log.warning(
                    'Skipping existing MASTER FLAT: {:s}'.format(master_flat))
            else:
                log.info('Writing master FLAT to file: {}'.format(master_flat))

            flat_combine_files = [os.path.join(red_path, f) for f in flat_list]

            flat_combine = combine.FlatCombine(input_list=flat_combine_files,
                                               output_file=master_flat)

            flat_combine.run()

            mask1 = df['obstype'].values == 'OBJECT'
            mask2 = df['binning'].values == b
            df.loc[mask1 & mask2, 'flat_file'] = master_flat

    return df
Exemplo n.º 5
0
def process_dark_files(df, red_path):
    """
    Args:

        df (pandas.DataFrame) : a data-frame containing the all the data being
        processed.

        red_path (str) : the path where the reduced data is stored.

    Returns:

        updated_table (pandas.DataFrame) : an updated data-frame where each
        file now is attached to the corresponding master Zero file.

    """
    sami_pipeline = reduce.SamiReducer()

    binning = df.binning.unique()

    for b in binning:

        bx, by = b.split(' ')
        log.info('Processing DARK files with binning: {} x {}'.format(bx, by))

        mask1 = df.obstype.values == 'DARK'
        mask2 = df.binning.values == b

        dark_table = df.loc[mask1 & mask2]
        dark_table = dark_table.sort_values('filename')

        dark_list = []
        for index, row in dark_table.iterrows():

            sami_pipeline.cosmic_rays = True
            sami_pipeline.dark_file = None
            sami_pipeline.flat_file = None
            sami_pipeline.time = True
            sami_pipeline.zero_file = row.zero_file

            dark_file = row.filename

            path, fname = os.path.split(dark_file)
            prefix = sami_pipeline.get_prefix()
            output_dark_file = os.path.join(red_path, prefix + fname)

            dark_list.append(prefix + fname)

            if os.path.exists(output_dark_file):
                log.warning('Skipping existing file: {}'.format(
                    output_dark_file))
                continue

            log.info('Processing DARK file: {}'.format(dark_file))

            hdul = pyfits.open(dark_file)
            data, header, prefix = sami_pipeline.reduce(hdul)
            pyfits.writeto(output_dark_file, data, header)

        if len(dark_list) == 0:
            continue

        dark_list_name = os.path.join(red_path, "1Dark{}x{}".format(bx, by))

        with open(dark_list_name, 'w') as dark_list_buffer:
            for dark_file in dark_list:
                dark_list_buffer.write('{:s}\n'.format(dark_file))

        log.info('Combining ZERO files.')
        master_dark = dark_list_name + '.fits'

        if os.path.exists(master_dark):
            log.warning(
                'Skipping existing MASTER ZERO: {:s}'.format(master_dark))

        else:

            log.info("Writing master zero to: {}".format(master_dark))
            dark_combine_files = [os.path.join(red_path, f) for f in dark_list]

            dark_combine = combine.DarkCombine(input_list=dark_combine_files,
                                               output_file=master_dark)
            dark_combine.run()
            log.info('Done.')

        mask1 = df['obstype'].values != 'DARK'
        mask2 = df['binning'].values == b
        df.loc[mask1 & mask2, 'dark_file'] = master_dark

    return df