예제 #1
0
파일: stage0.py 프로젝트: LiuDezi/pyDANDIA
def set_bad_pixel_mask_directory(setup,
                                 reduction_metadata,
                                 bpm_directory_path=None,
                                 verbose=False,
                                 log=None):
    '''
    This found all the images.

    :param object reduction_metadata: the metadata object
    :param string images_directory_path: the directory of the images
    :param boolean verbose: switch to True to have more informations

    :return: the list of images (strings)
    :rtype: list
    '''

    if 'BPM_PATH' in reduction_metadata.data_architecture[1].keys():
        reduction_metadata.update_a_cell_to_layer('data_architecture', 0,
                                                  'BPM_PATH',
                                                  bpm_directory_path)

    else:

        reduction_metadata.add_column_to_layer('data_architecture',
                                               'BPM_PATH',
                                               [bpm_directory_path],
                                               new_column_format=None,
                                               new_column_unit=None)

    logs.ifverbose(log, setup, 'Set bad pixel mask directory')
예제 #2
0
def open_reference(setup, ref_image_directory, ref_image_name, kernel_size,
                   max_adu, ref_extension = 0, log = None, central_crop = None):
    '''
    reading difference image for constructing u matrix

    :param object string: reference imagefilename
    :param object string: reference image filename
    :param object integer: kernel size edge length of the kernel in px
    :param object float: index of the maximum adu values
    :return: images, mask
    '''

    logs.ifverbose(log, setup,
                   'Attempting to open ref image ' + os.path.join(ref_image_directory, ref_image_name))

    ref_image = fits.open(os.path.join(ref_image_directory, ref_image_name), mmap=True)

	#increase kernel size by 2 and define circular mask
    kernel_size_plus = int(kernel_size)+4
    mask_kernel = np.ones(kernel_size_plus * kernel_size_plus, dtype=float)
    mask_kernel = mask_kernel.reshape((kernel_size_plus, kernel_size_plus))
    xyc = int(kernel_size_plus / 2)
    radius_square = (xyc)**2
    for idx in range(kernel_size_plus):
        for jdx in range(kernel_size_plus):
            if (idx - xyc)**2 + (jdx - xyc)**2 >= radius_square:
                mask_kernel[idx, jdx] = 0.
    img_shape = np.shape(ref_image[ref_extension].data) 
    ref50pc = np.median(ref_image[ref_extension].data)
    ref_bright_mask = ref_image[ref_extension].data > max_adu + ref50pc
    ref_image[ref_extension].data = background_subtract(setup, ref_image[ref_extension].data, ref50pc)

    ref_image_unmasked = np.copy(ref_image[ref_extension].data)
    if central_crop != None:
        tmp_image = np.zeros(np.shape(ref_image[ref_extension].data))
        tmp_image[central_crop:-central_crop,central_crop:-central_crop] = ref_image[ref_extension].data[central_crop:-central_crop,central_crop:-central_crop]
        ref_image[ref_extension].data = tmp_image

    mask_extended = np.zeros((np.shape(ref_image[ref_extension].data)[0] + 2 * kernel_size,
                             np.shape(ref_image[ref_extension].data)[1] + 2 * kernel_size))
    mask_extended[kernel_size:-kernel_size, kernel_size:-kernel_size][ref_bright_mask] = 1.
    ref_extended = np.zeros((np.shape(ref_image[ref_extension].data)[0] + 2 * kernel_size,
                             np.shape(ref_image[ref_extension].data)[1] + 2 * kernel_size))
    ref_extended[kernel_size:-kernel_size, kernel_size:-
                 kernel_size] = np.array(ref_image[ref_extension].data, float)
    
    #apply consistent mask
    ref_bright_mask = mask_extended > 0.
    mask_propagate = np.zeros(np.shape(ref_extended))
    mask_propagate[ref_bright_mask] = 1.
    #increase mask size to kernel size
    mask_propagate = convolve2d(mask_propagate, mask_kernel, mode='same')
    bright_mask = mask_propagate > 0.
    ref_extended[bright_mask] = 0.
    #dout = fits.PrimaryHDU(ref_extended)
    #dout.writeto('refext'+ref_image_name,overwrite=True)
    return ref_extended, bright_mask, ref_image_unmasked
예제 #3
0
    def find_images_need_to_be_process(self,
                                       setup,
                                       list_of_images,
                                       stage_number=None,
                                       rerun_all=None,
                                       log=None):
        '''
        This finds the images that need to be processed by the pipeline, i.e not already done.

        :param object reduction_metadata: the metadata object
        :param  list list_of_images: the directory of the images
        :param boolean verbose: switch to True to have more information

        :return: the new images that need to be processed.
        :rtype: list
        '''

        column_name = 'STAGE_' + str(stage_number)
        if rerun_all:
            for name in list_of_images:
                self.update_a_cell_to_layer('reduction_status', 0, column_name,
                                            0)

        layer = self.reduction_status

        try:

            if len(layer[1]) == 0:

                new_images = list_of_images

            else:

                new_images = []

                for name in list_of_images:

                    image_row = np.where(layer[1]['IMAGES'] == name)[0][0]

                    if layer[1][image_row][column_name] != '1':
                        logs.ifverbose(
                            log, setup, name +
                            ' is a new image to process by stage number: ' +
                            str(stage_number))
                        new_images.append(name)

        except:
            if log != None:
                log.info('Error in scanning for new images to reduce')

        if log != None:
            log.info('Total of ' + str(len(new_images)) +
                     ' images need reduction')

        return new_images
예제 #4
0
def psf_star_selection(setup,reduction_metadata,log,ref_star_catalog,
                                                    diagnostics=False):
    """Function to select PSF stars from an existing list of stars detected
    in the reference image.
    
    Input parameters:
        setup       PipelineSetup object    Essential reduction parameters
        metadata    Table                   Reduction metadata
        log         logging object          Stage reduction log
        ref_star_catalog    Numpy array     Catalogue of detected stars
        
    Outputs:
        psf_stars_idx       Numpy array     [0,1] indicating whether each 
                                            star in the ref_star_catalog is 
                                            selected.  
    """
    
    psf_stars_idx = np.ones(len(ref_star_catalog))
    
    # Exclude brightest and faintest 10% of star list
    psf_stars_idx = id_mid_range_stars(setup,reduction_metadata,log,
                                       ref_star_catalog,psf_stars_idx)
    
    # Exclude stars with neighbours within proximity threshold that
    # also exceed the flux ratio threshold:
    psf_stars_idx = id_crowded_stars(setup,reduction_metadata,log,
                                      ref_star_catalog,psf_stars_idx)
    
    ref_star_catalog[:,15] = psf_stars_idx
    idx = np.where(ref_star_catalog[:,15] == 1.0)
    
    psf_idx = ref_star_catalog[idx[0],0]
    
    if setup.verbosity >= 1:
        
        logs.ifverbose(log,setup,'Selected PSF stars: ')
        
        for i in range(0,len(psf_idx),1):
            
            j = int(psf_idx[i])
            
            logs.ifverbose(log,setup,str(j)+' at ('+
            str(ref_star_catalog[i,1])+', '+str(ref_star_catalog[i,2])+')')
    
    # [Optionally] plot selection
    if diagnostics == True:
        
        plot_ref_star_catalog_positions(setup,reduction_metadata,log,
                                    ref_star_catalog, psf_stars_idx)
    
    return ref_star_catalog
예제 #5
0
def find_x_y_shifts_from_the_reference_image(setup,
                                             reference_image,
                                             target_image,
                                             edgefraction=0.5,
                                             log=None):
    """
    Found the pixel offset of the target image with the reference image

    :param object setup: the setup object
    :param object reference_image: the reference image data (i.e image.data)
    :param object target_image: the image data of interest (i.e image.data)
    :param float edgefraction: the percentage of images use for the shift computation (smaller = faster, [0,1])
    :param object log: the log object


    :return: [x_new_center, y_new_center, x_shift, y_shift], the new center and the correspondind shift of this image
    :rtype: array_like
    """

    reference_shape = reference_image.shape
    if reference_shape != target_image.shape:
        logs.ifverbose(
            log, setup,
            'The reference image and the target image dimensions does not match! Abort stage4'
        )
        sys.exit(1)

    x_center = int(reference_shape[0] / 2)
    y_center = int(reference_shape[1] / 2)

    half_x = int(edgefraction * float(reference_shape[0]) / 2)
    half_y = int(edgefraction * float(reference_shape[1]) / 2)

    reduce_template = reference_image[x_center - half_x:x_center + half_x,
                                      y_center - half_y:y_center + half_y]

    reduce_image = target_image[x_center - half_x:x_center + half_x,
                                y_center - half_y:y_center + half_y]
    x_shift, y_shift = correlation_shift(reduce_template, reduce_image)

    x_new_center = -x_shift + x_center
    y_new_center = -y_shift + y_center

    return x_new_center, y_new_center, x_shift, y_shift
예제 #6
0
파일: stage0.py 프로젝트: LiuDezi/pyDANDIA
def update_reduction_metadata_stamps(setup,
                                     reduction_metadata,
                                     open_image,
                                     stamp_size=None,
                                     arcseconds_stamp_size=(60, 60),
                                     pixel_scale=None,
                                     number_of_overlaping_pixels=25,
                                     log=None):
    '''
    Create the stamps definition in the reduction_metadata

    :param object reduction_metadata: the metadata object
    :param astropy.image open_image: the opened image
    :param list stamp_sizes: list of integer give the X,Y stamp size , i.e [150,52] give 150 pix in X, 52 in Y
    :param tuple arcseconds_stamp_size: list of integer give the X,Y stamp size in arcseconds units
    :param float pixel_scale: pixel scale of the CCD, in arcsec/pix
    :param int number_of_overlaping_pixels : half of  number of pixels in both direction you want overlaping

    '''

    if pixel_scale:
        pass
    else:
        pixel_scale = float(
            reduction_metadata.reduction_parameters[1]['PIX_SCALE'][0])

    (status, report,
     stamps) = construct_the_stamps(open_image,
                                    stamp_size,
                                    arcseconds_stamp_size,
                                    pixel_scale,
                                    number_of_overlaping_pixels,
                                    log=log)

    names = ['PIXEL_INDEX', 'Y_MIN', 'Y_MAX', 'X_MIN', 'X_MAX']
    formats = ['int', 'S200', 'S200', 'S200', 'S200']
    units = ['', 'pixel', 'pixel', 'pixel', 'pixel']

    reduction_metadata.create_stamps_layer(names, formats, units, stamps)

    logs.ifverbose(log, setup, 'Updated reduction metadata stamps')
예제 #7
0
def open_an_image(setup, image_directory, image_name, image_index=0, log=None):
    '''
    Simply open an image using astropy.io.fits

    :param object reduction_metadata: the metadata object
    :param string image_directory: the image name
    :param string image_name: the image name
    :param string image_index: the image index of the astropy fits object

    :param boolean verbose: switch to True to have more informations

    :return: the opened image
    :rtype: astropy.image object
    '''
    image_directory_path = image_directory

    logs.ifverbose(
        log, setup, 'Attempting to open image ' +
        os.path.join(image_directory_path, image_name))

    try:

        image_data = fits.open(os.path.join(image_directory_path, image_name),
                               mmap=True)
        image_data = image_data[image_index]

        logs.ifverbose(log, setup, image_name + ' open : OK')

        return image_data.data

    except:
        logs.ifverbose(log, setup, image_name + ' open : not OK!')

        return None
예제 #8
0
def open_data_image(setup, data_image_directory, data_image_name, reference_mask, kernel_size,
                    max_adu, data_extension = 0, log = None, xshift = 0, yshift = 0, sigma_smooth = 0, central_crop = None):
    '''
    reading difference image for constructing u matrix

    :param object string: reference imagefilename
    :param object string: reference image filename
    :param object integer: kernel size edge length of the kernel in px
    :param object float: index of the maximum adu values
    :return: images, mask
    '''

    logs.ifverbose(log, setup, 'Attempting to open data image ' + os.path.join(data_image_directory, data_image_name))
    data_image = fits.open(os.path.join(data_image_directory, data_image_name), mmap=True)
    img50pc = np.median(data_image[data_extension].data)
    data_image[data_extension].data = background_subtract(setup, data_image[data_extension].data, img50pc)
    img_shape = np.shape(data_image[data_extension].data)
    shifted = np.zeros(img_shape)
    #smooth data image
    if sigma_smooth != 0:
        data_image[data_extension].data = gaussian_filter(data_image[data_extension].data, sigma=sigma_smooth)

    if xshift>img_shape[0] or yshift>img_shape[1]:
        return []
    data_image[data_extension].data = shift(data_image[data_extension].data, (-yshift,-xshift), cval=0.)
    data_image_unmasked = np.copy(data_image[data_extension].data)
    if central_crop != None:
        tmp_image = np.zeros(np.shape(data_image[data_extension].data))
        tmp_image[central_crop:-central_crop,central_crop:-central_crop] = data_image[data_extension].data[central_crop:-central_crop,central_crop:-central_crop]
        data_image[data_extension].data =tmp_image
    # extend image size for convolution and kernel solution
    data_extended = np.zeros((np.shape(data_image[data_extension].data)[0] + 2 * kernel_size, np.shape(data_image[data_extension].data)[1] + 2 * kernel_size))
    data_extended[kernel_size:-kernel_size, kernel_size:-
                 kernel_size] = np.array(data_image[data_extension].data, float)
    
    #apply consistent mask    
    data_extended[reference_mask] = 0.
    #dout = fits.PrimaryHDU(data_extended)
    #dout.writeto('datext'+data_image_name,overwrite=True)
    return data_extended, data_image_unmasked
예제 #9
0
    def find_all_images(self,
                        setup,
                        reduction_metadata,
                        images_directory_path=None,
                        log=None):
        '''
        Find all the images.

        :param object reduction_metadata: the metadata object
        :param string images_directory_path: the directory of the images
        :param boolean verbose: switch to True to have more information

        :return: the list of images (strings)
        :rtype: list
        '''

        try:

            path = reduction_metadata.data_architecture[1]['IMAGES_PATH'][0]

        except:

            if images_directory_path:
                path = images_directory_path

                reduction_metadata.add_column_to_layer('data_architecture',
                                                       'images_path', [path])

        try:

            list_of_images = [
                i for i in os.listdir(path)
                if ('.fits' in i) and ('.gz' not in i) and ('.bz2' not in i)
            ]

            if list_of_images == []:

                logs.ifverbose(log, setup,
                               'No images to process. I take a rest :)')

                return None

            else:

                logs.ifverbose(log, setup, 'Found ' + str(len(list_of_images)) + \
                               ' images in this dataset')

                return list_of_images

        except:

            logs.ifverbose(log, setup,
                           'Something went wrong with the image search!')

            return None
예제 #10
0
파일: stage0.py 프로젝트: LiuDezi/pyDANDIA
def create_or_load_the_reduction_metadata(
        setup,
        output_metadata_directory,
        metadata_name='pyDANDIA_metadata.fits',
        log=None):
    '''
    This creates (new reduction) or load (ongoing reduction) the metadata file linked to this reduction.

    :param string output_metadata_directory: the directory where to place the metadata
    :param string metadata_name: the name of the metadata file
    :param boolean verbose: switch to True to have more informations

    :return: the metadata object
    :rtype: metadata object
    '''
    try:

        meta_data_exist = [
            i for i in os.listdir(output_metadata_directory)
            if (i == metadata_name)
        ]

        if meta_data_exist == []:

            reduction_metadata = metadata.MetaData()

            reduction_metadata.create_metadata_file(output_metadata_directory,
                                                    metadata_name)

            logs.ifverbose(log, setup,
                           'Successfully created the reduction metadata file')

        else:

            reduction_metadata = metadata.MetaData()
            reduction_metadata.load_all_metadata(output_metadata_directory,
                                                 metadata_name)
            logs.ifverbose(log, setup,
                           'Successfully found the reduction metadata')
    except:

        logs.ifverbose(log, setup,
                       'No metadata created or loaded : check this!')

        sys.exit(1)

    return reduction_metadata
예제 #11
0
def open_an_image(setup, image_directory, image_name,
                  image_index=0, log=None):
   
    image_directory_path = image_directory

    logs.ifverbose(log, setup,
                   'Attempting to open image ' + os.path.join(image_directory_path, image_name))

    try:

        image_data = fits.open(os.path.join(image_directory_path, image_name),
                               mmap=True)
        image_data = image_data[image_index]

        logs.ifverbose(log, setup, image_name + ' open : OK')

        return image_data

    except:
        logs.ifverbose(log, setup, image_name + ' open : not OK!')

        return None
예제 #12
0
파일: stage5.py 프로젝트: LiuDezi/pyDANDIA
def subtract_small_format_image(new_images,
                                reference_image_name,
                                reference_image_directory,
                                reduction_metadata,
                                setup,
                                data_image_directory,
                                kernel_size,
                                max_adu,
                                ref_stats,
                                maxshift,
                                kernel_directory_path,
                                diffim_directory_path,
                                log=None):

    reference_image, bright_reference_mask, reference_image_unmasked = open_reference(
        setup,
        reference_image_directory,
        reference_image_name,
        kernel_size,
        max_adu,
        ref_extension=0,
        log=log,
        central_crop=maxshift)
    if len(new_images) > 0:
        #construct outlier map for kernel
        #new_images = [new_images[0]]
        if os.path.exists(
                os.path.join(kernel_directory_path,
                             'unweighted_u_matrix.npy')):
            umatrix, kernel_size_u, max_adu_restored = np.load(
                os.path.join(kernel_directory_path, 'unweighted_u_matrix.npy'))
            if (kernel_size_u != kernel_size) or (max_adu_restored != max_adu):
                #calculate and store unweighted umatrix
                umatrix = umatrix_constant(reference_image, kernel_size)
                np.save(
                    os.path.join(kernel_directory_path,
                                 'unweighted_u_matrix.npy'),
                    [umatrix, kernel_size, max_adu])
                hdutmp = fits.PrimaryHDU(umatrix)
                hdutmp.writeto(os.path.join(kernel_directory_path,
                                            'unweighted_u_matrix.fits'),
                               overwrite=True)
        else:
            #calculate and store unweighted umatrix
            umatrix = umatrix_constant(reference_image, kernel_size)
            np.save(
                os.path.join(kernel_directory_path, 'unweighted_u_matrix.npy'),
                [umatrix, kernel_size, max_adu])
            hdutmp = fits.PrimaryHDU(umatrix)
            hdutmp.writeto(os.path.join(kernel_directory_path,
                                        'unweighted_u_matrix.fits'),
                           overwrite=True)

    for new_image in new_images:
        row_index = np.where(
            reduction_metadata.images_stats[1]['IM_NAME'] == new_image)[0][0]
        ref_fwhm_x, ref_fwhm_y, ref_sigma_x, ref_sigma_y = ref_stats
        x_shift, y_shift = -reduction_metadata.images_stats[1][row_index][
            'SHIFT_X'], -reduction_metadata.images_stats[1][row_index][
                'SHIFT_Y']
        #if the reference is not as sharp as a data image -> smooth the data
        smoothing = 0.
        smoothing_y = 0.
        if reduction_metadata.images_stats[1][row_index]['FWHM_X'] < ref_fwhm_x:
            sigma_x = reduction_metadata.images_stats[1][row_index][
                'FWHM_X'] / (2. * (2. * np.log(2.))**0.5)
            smoothing = (ref_sigma_x**2 - sigma_x**2)**0.5
        if reduction_metadata.images_stats[1][row_index]['FWHM_Y'] < ref_fwhm_y:
            sigma_y = reduction_metadata.images_stats[1][row_index][
                'FWHM_Y'] / (2. * (2. * np.log(2.))**0.5)
            smoothing_y = (ref_sigma_y**2 - sigma_y**2)**0.5
        if smoothing_y > smoothing:
            smoothing = smoothing_y
        try:
            data_image, data_image_unmasked = open_data_image(
                setup,
                data_image_directory,
                new_image,
                bright_reference_mask,
                kernel_size,
                max_adu,
                xshift=x_shift,
                yshift=y_shift,
                sigma_smooth=smoothing,
                central_crop=maxshift)
            missing_data_mask = (data_image == 0.)
            b_vector = bvector_constant(reference_image, data_image,
                                        kernel_size)
            kernel_matrix, bkg_kernel, kernel_uncertainty = kernel_solution(
                umatrix, b_vector, kernel_size, circular=False)
            pscale = np.sum(kernel_matrix)
            np.save(
                os.path.join(kernel_directory_path,
                             'kernel_' + new_image + '.npy'),
                [kernel_matrix, bkg_kernel])
            kernel_header = fits.Header()
            kernel_header['SCALEFAC'] = str(pscale)
            kernel_header['KERBKG'] = bkg_kernel
            hdu_kernel = fits.PrimaryHDU(kernel_matrix, header=kernel_header)
            hdu_kernel.writeto(os.path.join(kernel_directory_path,
                                            'kernel_' + new_image),
                               overwrite=True)
            hdu_kernel_err = fits.PrimaryHDU(kernel_uncertainty)
            hdu_kernel_err.writeto(os.path.join(kernel_directory_path,
                                                'kernel_err_' + new_image),
                                   overwrite=True)
            if log is not None:
                logs.ifverbose(
                    log, setup, 'b_vector calculated for:' + new_image +
                    ' and scale factor ' + str(pscale))
            #CROP EDGE!
            difference_image = subtract_images(data_image_unmasked,
                                               reference_image_unmasked,
                                               kernel_matrix, kernel_size,
                                               bkg_kernel)

            new_header = fits.Header()
            new_header['SCALEFAC'] = pscale
            difference_image_hdu = fits.PrimaryHDU(difference_image,
                                                   header=new_header)
            difference_image_hdu.writeto(os.path.join(diffim_directory_path,
                                                      'diff_' + new_image),
                                         overwrite=True)
        except Exception as e:
            if log is not None:
                logs.ifverbose(
                    log, setup, 'kernel matrix computation or shift failed:' +
                    new_image + '. skipping!' + str(e))
            else:
                print(str(e))
예제 #13
0
def run_stage4(setup):
    """Main driver function to run stage 4: image alignement.
    This stage align the images to the reference frame!
    :param object setup : an instance of the ReductionSetup class. See reduction_control.py

    :return: [status, report, reduction_metadata], the stage4 status, the report, the metadata file
    :rtype: array_like

    """

    stage4_version = 'stage4 v0.1'

    log = logs.start_stage_log(setup.red_dir, 'stage4', version=stage4_version)
    log.info('Setup:\n' + setup.summary() + '\n')

    # find the metadata
    reduction_metadata = metadata.MetaData()
    reduction_metadata.load_all_metadata(setup.red_dir,
                                         'pyDANDIA_metadata.fits')

    # find the images needed to treat
    all_images = reduction_metadata.find_all_images(setup,
                                                    reduction_metadata,
                                                    os.path.join(
                                                        setup.red_dir, 'data'),
                                                    log=log)

    new_images = reduction_metadata.find_images_need_to_be_process(
        setup, all_images, stage_number=4, rerun_all=None, log=log)

    if len(new_images) > 0:

        # find the reference image
        try:
            reference_image_name = reduction_metadata.data_architecture[1][
                'REF_IMAGE'].data[0]
            reference_image_directory = reduction_metadata.data_architecture[
                1]['REF_PATH'].data[0]
            reference_image = open_an_image(setup,
                                            reference_image_directory,
                                            reference_image_name,
                                            image_index=0,
                                            log=None)
            logs.ifverbose(
                log, setup,
                'I found the reference frame:' + reference_image_name)
        except KeyError:
            logs.ifverbose(log, setup,
                           'I can not find any reference image! Abort stage4')

            status = 'KO'
            report = 'No reference frame found!'

            return status, report

        data = []
        images_directory = reduction_metadata.data_architecture[1][
            'IMAGES_PATH'].data[0]
        for new_image in new_images:
            target_image = open_an_image(setup,
                                         images_directory,
                                         new_image,
                                         image_index=0,
                                         log=None)

            try:
                x_new_center, y_new_center, x_shift, y_shift = find_x_y_shifts_from_the_reference_image(
                    setup,
                    reference_image,
                    target_image,
                    edgefraction=0.5,
                    log=None)

                data.append([new_image, x_shift, y_shift])
                logs.ifverbose(
                    log, setup,
                    'I found the image translation to the reference for frame:'
                    + new_image)

            except:

                logs.ifverbose(
                    log, setup,
                    'I can not find the image translation to the reference for frame:'
                    + new_image + '. Abort stage4!')

                status = 'KO'
                report = 'No shift  found for image:' + new_image + ' !'

                return status, report

        if ('SHIFT_X' in reduction_metadata.images_stats[1].keys()) and (
                'SHIFT_Y' in reduction_metadata.images_stats[1].keys()):

            for index in range(len(data)):
                target_image = data[index][0]
                x_shift = data[index][1]
                y_shift = data[index][2]
                row_index = np.where(reduction_metadata.images_stats[1]
                                     ['IM_NAME'].data == new_image)[0][0]
                reduction_metadata.update_a_cell_to_layer(
                    'images_stats', row_index, 'SHIFT_X', x_shift)
                reduction_metadata.update_a_cell_to_layer(
                    'images_stats', row_index, 'SHIFT_Y', y_shift)
                logs.ifverbose(log, setup,
                               'Updated metadata for image: ' + target_image)
        else:
            logs.ifverbose(log, setup,
                           'I have to construct SHIFT_X and SHIFT_Y columns')

            sorted_data = np.copy(data)

            for index in range(len(data)):
                target_image = data[index][0]

                row_index = np.where(reduction_metadata.images_stats[1]
                                     ['IM_NAME'].data == new_image)[0][0]

                sorted_data[row_index] = data[index]

            column_format = 'int'
            column_unit = 'pix'
            reduction_metadata.add_column_to_layer(
                'images_stats',
                'SHIFT_X',
                sorted_data[:, 1],
                new_column_format=column_format,
                new_column_unit=column_unit)

            reduction_metadata.add_column_to_layer(
                'images_stats',
                'SHIFT_Y',
                sorted_data[:, 2],
                new_column_format=column_format,
                new_column_unit=column_unit)

    reduction_metadata.update_reduction_metadata_reduction_status(
        new_images, stage_number=4, status=1, log=log)

    reduction_metadata.save_updated_metadata(
        reduction_metadata.data_architecture[1]['OUTPUT_DIRECTORY'][0],
        reduction_metadata.data_architecture[1]['METADATA_NAME'][0],
        log=log)

    logs.close_log(log)

    status = 'OK'
    report = 'Completed successfully'

    return status, report
예제 #14
0
파일: stage0.py 프로젝트: LiuDezi/pyDANDIA
def run_stage0(setup):
    """Main driver function to run stage 0: data preparation.    
    The tasks of this stage are to ensure that all images are prepared for 
    reduction, and to make sure the reduction metadata is up to date.
    Input: setup - is an instance of the ReductionSetup class. See 
           reduction_control.py
    Output: prepares the metadata file
    """

    stage0_version = 'stage0 v0.1'

    log = logs.start_stage_log(setup.red_dir, 'stage0', version=stage0_version)
    log.info('Setup:\n' + setup.summary() + '\n')

    # find and update the pipeline config
    pipeline_config = read_the_config_file(setup.pipeline_config_dir, log=log)

    reduction_metadata = create_or_load_the_reduction_metadata(
        setup, setup.red_dir, metadata_name='pyDANDIA_metadata.fits', log=log)

    update_reduction_metadata_with_config_file(reduction_metadata,
                                               pipeline_config,
                                               log=log)

    # find all images

    all_images = reduction_metadata.find_all_images(setup,
                                                    reduction_metadata,
                                                    os.path.join(
                                                        setup.red_dir, 'data'),
                                                    log=log)

    # find and update the inst pipeline config

    image_name = all_images[0]

    inst_config_file_name = find_the_inst_config_file_name(
        setup,
        reduction_metadata,
        image_name,
        setup.pipeline_config_dir,
        image_index=0,
        log=None)

    inst_config = read_the_inst_config_file(setup.pipeline_config_dir,
                                            inst_config_file_name,
                                            log=log)
    update_reduction_metadata_with_inst_config_file(reduction_metadata,
                                                    inst_config,
                                                    log=log)

    # find images need to be run, based on the metadata file, if any. If rerun_all = True, force a rereduction

    new_images = reduction_metadata.find_images_need_to_be_process(
        setup, all_images, stage_number=0, rerun_all=None, log=log)
    # create new rows on reduction status for new images

    reduction_metadata.update_reduction_metadata_reduction_status(
        new_images, stage_number=0, status=0, log=log)

    # construct the stamps if needed
    if reduction_metadata.stamps[1]:
        pass
    else:

        open_image = open_an_image(
            setup,
            reduction_metadata.data_architecture[1]['IMAGES_PATH'][0],
            new_images[0],
            image_index=0,
            log=log)

        update_reduction_metadata_stamps(setup,
                                         reduction_metadata,
                                         open_image,
                                         stamp_size=None,
                                         arcseconds_stamp_size=(60, 60),
                                         pixel_scale=None,
                                         number_of_overlaping_pixels=25,
                                         log=log)

    if len(new_images) > 0:

        update_reduction_metadata_headers_summary_with_new_images(
            setup, reduction_metadata, new_images, log=log)

        set_bad_pixel_mask_directory(setup,
                                     reduction_metadata,
                                     bpm_directory_path=os.path.join(
                                         setup.red_dir, 'data'),
                                     log=log)

        logs.ifverbose(log, setup,
                       'Updating metadata with info on new images...')

        for new_image in new_images:
            open_image = open_an_image(
                setup,
                reduction_metadata.data_architecture[1]['IMAGES_PATH'][0],
                new_image,
                image_index=0,
                log=log)

            bad_pixel_mask = open_an_image(
                setup,
                reduction_metadata.data_architecture[1]['BPM_PATH'][0],
                new_image,
                image_index=2,
                log=log)

            # Occasionally, the LCO BANZAI pipeline fails to produce an image
            # catalogue for an image.  If this happens, there will only be 2
            # extensions to the FITS image HDU, the PrimaryHDU (main image data)
            # and the ImageHDU (BPM).
            if bad_pixel_mask == None:

                bad_pixel_mask = open_an_image(
                    setup,
                    reduction_metadata.data_architecture[1]['BPM_PATH'][0],
                    new_image,
                    image_index=1,
                    log=log)

            master_mask = construct_the_pixel_mask(open_image,
                                                   bad_pixel_mask, [1, 3],
                                                   saturation_level=65535,
                                                   low_level=0,
                                                   log=log)

            save_the_pixel_mask_in_image(reduction_metadata, new_image,
                                         master_mask)
            logs.ifverbose(log, setup, ' -> ' + new_image)

    reduction_metadata.update_reduction_metadata_reduction_status(
        new_images, stage_number=0, status=1, log=log)

    reduction_metadata.save_updated_metadata(
        reduction_metadata.data_architecture[1]['OUTPUT_DIRECTORY'][0],
        reduction_metadata.data_architecture[1]['METADATA_NAME'][0],
        log=log)

    logs.close_log(log)

    status = 'OK'
    report = 'Completed successfully'

    return status, report, reduction_metadata
예제 #15
0
def run_psf_photometry(setup,
                       reduction_metadata,
                       log,
                       ref_star_catalog,
                       image_path,
                       psf_model,
                       sky_model,
                       centroiding=True):
    """Function to perform PSF fitting photometry on all stars for a single
    image.
    
    :param SetUp object setup: Essential reduction parameters
    :param MetaData reduction_metadata: pipeline metadata for this dataset
    :param logging log: Open reduction log object
    :param array ref_star_catalog: catalog of objects detected in the image
    :param str image_path: Path to image to be photometered
    :param PSFModel object psf_model: PSF to be fitted to each star
    :param BackgroundModel object sky_model: Model for the image sky background
    :param boolean centroiding: Switch to (dis)-allow re-fitting of each star's
                                x, y centroid.  Default=allowed (True)
    
    Returns:
    
    :param array ref_star_catalog: catalog of objects detected in the image
    """

    log.info('Starting photometry of ' + os.path.basename(image_path))

    data = fits.getdata(image_path)
    residuals = np.copy(data)

    psf_size = reduction_metadata.reduction_parameters[1]['PSF_SIZE'][0]
    half_psf = int(float(psf_size) / 2.0)

    logs.ifverbose(log,setup,'Applying '+psf_model.psf_type()+\
                    ' PSF of diameter='+str(psf_size))

    Y_data, X_data = np.indices((int(psf_size), int(psf_size)))

    for j in range(0, len(ref_star_catalog), 1):

        xstar = ref_star_catalog[j, 1]
        ystar = ref_star_catalog[j, 2]

        X_grid = X_data + (int(xstar) - half_psf)
        Y_grid = Y_data + (int(ystar) - half_psf)

        logs.ifverbose(log,setup,' -> Star '+str(j)+' at position ('+\
        str(xstar)+', '+str(ystar)+')')

        (fitted_model,
         good_fit) = psf.fit_star_existing_model(setup,
                                                 data,
                                                 xstar,
                                                 ystar,
                                                 psf_size,
                                                 psf_model,
                                                 sky_model,
                                                 centroiding=centroiding,
                                                 diagnostics=False)

        logs.ifverbose(
            log, setup, ' -> Star ' + str(j) + ' fitted model parameters = ' +
            repr(fitted_model.get_parameters()) + ' good fit? ' +
            repr(good_fit))

        if good_fit == True:

            sub_psf_model = psf.get_psf_object('Moffat2D')

            pars = fitted_model.get_parameters()
            pars[1] = (psf_size / 2.0) + (ystar - int(ystar))
            pars[2] = (psf_size / 2.0) + (xstar - int(xstar))

            sub_psf_model.update_psf_parameters(pars)

            (res_image,
             corners) = psf.subtract_psf_from_image(data, sub_psf_model, xstar,
                                                    ystar, psf_size, psf_size)

            residuals[corners[2]:corners[3],
                      corners[0]:corners[1]] = res_image[corners[2]:corners[3],
                                                         corners[0]:corners[1]]

            logs.ifverbose(
                log, setup,
                ' -> Star ' + str(j) + ' subtracted PSF from the residuals')

            (flux, flux_err) = fitted_model.calc_flux(Y_grid, X_grid)

            (mag, mag_err) = convert_flux_to_mag(flux, flux_err)

            ref_star_catalog[j, 5] = flux
            ref_star_catalog[j, 6] = flux_err
            ref_star_catalog[j, 7] = mag
            ref_star_catalog[j, 8] = mag_err

            logs.ifverbose(
                log, setup, ' -> Star ' + str(j) + ' flux=' + str(flux) +
                ' +/- ' + str(flux_err) + ' ADU, '
                'mag=' + str(mag) + ' +/- ' + str(mag_err) + ' mag')

        else:

            logs.ifverbose(
                log, setup, ' -> Star ' + str(j) +
                ' No photometry possible from poor PSF fit')

    res_image_path = os.path.join(
        setup.red_dir, 'ref',
        os.path.basename(image_path).replace('.fits', '_res.fits'))

    hdu = fits.PrimaryHDU(residuals)
    hdulist = fits.HDUList([hdu])
    hdulist.writeto(res_image_path, overwrite=True)

    logs.ifverbose(log, setup, 'Output residuals image ' + res_image_path)

    plot_ref_mag_errors(setup, ref_star_catalog)

    log.info('Completed photometry')

    return ref_star_catalog
예제 #16
0
def run_psf_photometry_on_difference_image(setup, reduction_metadata, log,
                                           ref_star_catalog, difference_image,
                                           psf_model, kernel, kernel_error,
                                           ref_exposure_time):
    """Function to perform PSF fitting photometry on all stars for a single difference image.
    
    :param SetUp object setup: Essential reduction parameters
    :param MetaData reduction_metadata: pipeline metadata for this dataset
    :param logging log: Open reduction log object
    :param array ref_star_catalog: catalog of objects detected in the image
    :param array_like difference_image: the array of data on which performs photometry
    :param array_like psf_model: PSF to be fitted to each star
   
    
    Returns:
    
    :param array ref_star_catalog: catalog of objects detected in the image
    """

    psf_size = reduction_metadata.reduction_parameters[1]['PSF_SIZE'][0]
    half_psf = psf_size / 2

    size_stamp = int(psf_size) + 1
    if size_stamp % 2 == 0:
        size_stamp += 1

    Y_data, X_data = np.indices((size_stamp + 1, size_stamp + 1))

    list_image_id = []
    list_star_id = []

    list_ref_mag = []
    list_ref_mag_error = []
    list_ref_flux = []
    list_ref_flux_error = []

    list_delta_flux = []
    list_delta_flux_error = []
    list_mag = []
    list_mag_error = []

    list_phot_scale_factor = []
    list_phot_scale_factor_error = []
    list_background = []
    list_background_error = []

    list_align_x = []
    list_align_y = []

    phot_scale_factor = np.sum(kernel)
    error_phot_scale_factor = (np.sum(kernel_error))**0.5

    #kernel /=phot_scale_factor

    control_size = 50
    control_count = 0
    psf_parameters = psf_model.get_parameters()
    psf_parameters[0] = 1
    radius = psf_size / 2

    for j in range(0, len(ref_star_catalog), 1):

        list_image_id.append(0)
        list_star_id.append(ref_star_catalog[j, 0])

        ref_flux = ref_star_catalog[j, 5]
        error_ref_flux = ref_star_catalog[j, 6]

        list_ref_mag.append(ref_star_catalog[j, 5])
        list_ref_mag_error.append(ref_star_catalog[j, 6])
        list_ref_flux.append(ref_flux)
        list_ref_flux_error.append(error_ref_flux)

        xstar = ref_star_catalog[j, 1]
        ystar = ref_star_catalog[j, 2]

        X_grid = X_data + (int(np.round(xstar)) - half_psf)
        Y_grid = Y_data + (int(np.round(ystar)) - half_psf)

        logs.ifverbose(log,setup,' -> Star '+str(j)+' at position ('+\
        str(xstar)+', '+str(ystar)+')')

        psf_parameters[1] = xstar
        psf_parameters[2] = ystar

        psf_image = psf_model.psf_model(X_grid, Y_grid, psf_parameters)

        psf_convolve = convolution.convolve_image_with_a_psf(
            psf_image,
            kernel,
            fourrier_transform_psf=None,
            fourrier_transform_image=None,
            correlate=None,
            auto_correlation=None)
        try:

            max_x = int(
                np.min([
                    difference_image.shape[0],
                    np.max(X_data + (int(np.round(xstar)) - half_psf))
                ]))
            min_x = int(
                np.max([0,
                        np.min(X_data + (int(np.round(xstar)) - half_psf))]))
            max_y = int(
                np.min([
                    difference_image.shape[1],
                    np.max(Y_data + (int(np.round(ystar)) - half_psf))
                ]))
            min_y = int(
                np.max([0,
                        np.min(Y_data + (int(np.round(ystar)) - half_psf))]))

            data = difference_image[min_y:max_y, min_x:max_x]

            max_x = int(max_x - (int(np.round(xstar)) - half_psf))
            min_x = int(min_x - (int(np.round(xstar)) - half_psf))
            max_y = int(max_y - (int(np.round(ystar)) - half_psf))
            min_y = int(min_y - (int(np.round(ystar)) - half_psf))

            psf_fit = psf_convolve[min_y:max_y, min_x:max_x]
            psf_fit /= np.max(psf_fit)

            residuals = np.copy(data)

        except:
            import pdb
            pdb.set_trace()

        good_fit = True
        if good_fit == True:

            logs.ifverbose(
                log, setup,
                ' -> Star ' + str(j) + ' subtracted from the residuals')

            center = (psf_fit.shape)
            xx, yy = np.indices(psf_fit.shape)
            mask = ((xx - center[0] / 2)**2 +
                    (yy - center[1] / 2)**2) < radius**2
            mask2 = ((xx - center[0] / 2)**2 +
                     (yy - center[1] / 2)**2) < 2 * radius**2

            weight1 = 0.5 + np.abs(data + 0.25)**0.5
            weight2 = -0.5 + np.abs(data + 0.25)**0.5
            weight = (weight1**2 + weight2**2)**0.5

            rejected_points = 0

            intensities, cov = np.polyfit(psf_fit[mask2],
                                          data[mask2],
                                          1,
                                          w=1 / weight[mask2],
                                          cov=True)

            (flux, flux_err) = (intensities[0], (cov[0][0])**0.5)

            (back, back_err) = (intensities[1], cov[1][1]**0.5)

            flux_tot = ref_flux - flux / phot_scale_factor
            flux_err_tot = (error_ref_flux**2 +
                            (flux_err / phot_scale_factor)**2)**0.5

            list_delta_flux.append(flux)
            list_delta_flux_error.append(flux_err)

            (mag,
             mag_err) = convert_flux_to_mag(flux_tot / ref_exposure_time,
                                            flux_err_tot / ref_exposure_time)

            list_mag.append(mag)
            list_mag_error.append(mag_err)
            list_phot_scale_factor.append(phot_scale_factor)
            list_phot_scale_factor_error.append(error_phot_scale_factor)
            list_background.append(back)
            list_background_error.append(back_err)

            list_align_x.append(0)
            list_align_y.append(0)

        else:

            logs.ifverbose(
                log, setup,
                ' -> Star ' + str(j) + ' No photometry possible from poor fit')

            list_delta_flux.append(-10**30)
            list_delta_flux_error.append(-10**30)
            list_mag.append(-10**30)
            list_mag_error.append(-10**30)
            list_phot_scale_factor.append(np.sum(kernel))
            list_phot_scale_factor_error.append(0)
            list_background.append(0)
            list_background_error.append(0)

            list_align_x.append(0)
            list_align_y.append(0)
    #import pdb; pdb.set_trace()

    difference_image_photometry = [
        list_image_id, list_star_id, list_ref_mag, list_ref_mag_error,
        list_ref_flux, list_ref_flux_error, list_delta_flux,
        list_delta_flux_error, list_mag, list_mag_error,
        list_phot_scale_factor, list_phot_scale_factor_error, list_background,
        list_background_error, list_align_x, list_align_y
    ]

    log.info('Completed photometry on difference image')

    #return  difference_image_photometry, control_zone
    return np.array(difference_image_photometry).T, 1
예제 #17
0
def run_stage6(setup):
    """Main driver function to run stage 6: image substraction and photometry.
    This stage align the images to the reference frame!
    :param object setup : an instance of the ReductionSetup class. See reduction_control.py

    :return: [status, report, reduction_metadata], the stage4 status, the report, the metadata file
    :rtype: array_like

    """

    stage6_version = 'stage6 v0.1'

    log = logs.start_stage_log(setup.red_dir, 'stage6', version=stage6_version)
    log.info('Setup:\n' + setup.summary() + '\n')

    # find the metadata
    reduction_metadata = metadata.MetaData()
    reduction_metadata.load_all_metadata(setup.red_dir, 'pyDANDIA_metadata.fits')

    # find the images needed to treat
    all_images = reduction_metadata.find_all_images(setup, reduction_metadata,
                                                    os.path.join(setup.red_dir, 'data'), log=log)

    new_images = reduction_metadata.find_images_need_to_be_process(setup, all_images,
                                                                   stage_number=6, rerun_all=None, log=log)

    # find the starlist
    starlist =  reduction_metadata.star_catalog[1]     

    max_x = np.max(starlist['x_pixel'].data)
    max_y = np.max(starlist['y_pixel'].data)
    mask  = (starlist['psf_star'].data == 1) & (starlist['x_pixel'].data<max_x-25)  & (starlist['x_pixel'].data>25) & (starlist['y_pixel'].data<max_y-25)  & (starlist['y_pixel'].data>25)

    control_stars = starlist[mask][:10]
    star_coordinates = np.c_[control_stars['star_index'].data,
                             control_stars['x_pixel'].data,
                             control_stars['y_pixel'].data]

    for index,key in enumerate(starlist.columns.keys()):
    
        if index != 0:

     
         ref_star_catalog = np.c_[ref_star_catalog,starlist[key].data]

        else:
          
         ref_star_catalog = starlist[key].data



    psf_model = fits.open(reduction_metadata.data_architecture[1]['REF_PATH'].data[0]+'/psf_model.fits')

    psf_type = psf_model[0].header['PSFTYPE']
    psf_parameters = [0, psf_model[0].header['Y_CENTER'],
                      psf_model[0].header['X_CENTER'],
                      psf_model[0].header['GAMMA'],
                      psf_model[0].header['ALPHA']]       
    
 
    sky_model = sky_background.model_sky_background(setup,
                                        reduction_metadata,log,ref_star_catalog)


    psf_model = psf.get_psf_object( psf_type )
    psf_model.update_psf_parameters( psf_parameters)

    ind = ((starlist['x_pixel']-150)**2<1) & ((starlist['y_pixel']-150)**2<1)
    print (np.argmin(((starlist['x_pixel']-150)**2) + ((starlist['y_pixel']-150)**2)))
    if len(new_images) > 0:

        # find the reference image
        try:
            reference_image_name = reduction_metadata.data_architecture[1]['REF_IMAGE'].data[0]
            reference_image_directory = reduction_metadata.data_architecture[1]['REF_PATH'].data[0]
            reference_image,date = open_an_image(setup, reference_image_directory, reference_image_name, image_index=0,
                                            log=None)
                                            
            ref_image_name = reduction_metadata.data_architecture[1]['REF_IMAGE'].data[0]
            index_reference = np.where(ref_image_name == reduction_metadata.headers_summary[1]['IMAGES'].data)[0][0]
            ref_exposure_time = float(reduction_metadata.headers_summary[1]['EXPKEY'].data[index_reference])
   
            logs.ifverbose(log, setup,
                           'I found the reference frame:' + reference_image_name)
        except KeyError:
            logs.ifverbose(log, setup,
                           'I can not find any reference image! Aboard stage6')

            status = 'KO'
            report = 'No reference frame found!'

            return status, report

        # find the kernels directory
        try:

            kernels_directory = reduction_metadata.data_architecture[1]['OUTPUT_DIRECTORY'].data[0]+'kernel/'

            logs.ifverbose(log, setup,
                           'I found the kernels directory:' + kernels_directory)
        except KeyError:
            logs.ifverbose(log, setup,
                           'I can not find the kernels directory! Aboard stage6')

            status = 'KO'
            report = 'No kernels directory found!'

            return status, report

        data = []
        diffim_directory = reduction_metadata.data_architecture[1]['OUTPUT_DIRECTORY'].data[0]+'diffim/'
        images_directory = reduction_metadata.data_architecture[1]['IMAGES_PATH'].data[0]
        phot = np.zeros((len(new_images),len(ref_star_catalog),16))
        time = []
        for idx,new_image in enumerate(new_images):

            log.info('Starting difference photometry of '+new_image)
            target_image,date = open_an_image(setup, images_directory, new_image, image_index=0, log=None)
            kernel_image,kernel_error,kernel_bkg = find_the_associated_kernel(setup, kernels_directory, new_image)
         
            difference_image = image_substraction(setup, reduction_metadata,reference_image, kernel_image, new_image)-kernel_bkg
         

            time.append(date)

            save_control_stars_of_the_difference_image(setup, new_image, difference_image, star_coordinates)

            photometric_table, control_zone = photometry_on_the_difference_image(setup, reduction_metadata, log,ref_star_catalog,difference_image,  psf_model, sky_model, kernel_image,kernel_error, ref_exposure_time)
         
            phot[idx,:,:] = photometric_table

            #save_control_zone_of_residuals(setup, new_image, control_zone)     

            #ingest_photometric_table_in_db(setup, photometric_table) 
    import pdb; pdb.set_trace()
    import matplotlib.pyplot as plt 
    ind = ((starlist['x_pixel']-150)**2<1) & ((starlist['y_pixel']-150)**2<1)
    plt.errorbar(time,phot[:,ind,8],fmt='.k')
    
    
    plt.show()
    import pdb; pdb.set_trace()
    return status, report
예제 #18
0
파일: stage5.py 프로젝트: LiuDezi/pyDANDIA
def run_stage5(setup):
    """Main driver function to run stage 5: kernel_solution
    This stage finds the kernel solution and (optionally) subtracts the model
    image
    :param object setup : an instance of the ReductionSetup class. See reduction_control.py

    :return: [status, report, reduction_metadata], stage5 status, report, 
     metadata file
    :rtype: array_like
    """

    stage5_version = 'stage5 v0.1'

    log = logs.start_stage_log(setup.red_dir, 'stage5', version=stage5_version)
    log.info('Setup:\n' + setup.summary() + '\n')
    try:
        from umatrix_routine import umatrix_construction, umatrix_bvector_construction, bvector_construction

    except ImportError:
        log.info(
            'Uncompiled cython code, please run setup.py: e.g.\n python setup.py build_ext --inplace'
        )
        status = 'KO'
        report = 'Uncompiled cython code, please run setup.py: e.g.\n python setup.py build_ext --inplace'
        return status, report

    # find the metadata
    reduction_metadata = metadata.MetaData()
    reduction_metadata.load_all_metadata(setup.red_dir,
                                         'pyDANDIA_metadata.fits')

    #determine kernel size based on maximum FWHM
    fwhm_max = 0.
    shift_max = 0
    for stats_entry in reduction_metadata.images_stats[1]:
        if float(stats_entry['FWHM_X']) > fwhm_max:
            fwhm_max = stats_entry['FWHM_X']
        if float(stats_entry['FWHM_Y']) > fwhm_max:
            fwhm_max = stats_entry['FWHM_Y']
        if abs(float(stats_entry['SHIFT_X'])) > shift_max:
            shift_max = abs(float(stats_entry['SHIFT_X']))
        if abs(float(stats_entry['SHIFT_Y'])) > shift_max:
            shift_max = abs(float(stats_entry['SHIFT_Y']))
    maxshift = int(shift_max) + 2
    #image smaller or equal 500x500
    large_format_image = False

    sigma_max = fwhm_max / (2. * (2. * np.log(2.))**0.5)
    # Factor 4 corresponds to the radius of 2*FWHM the old pipeline
    kernel_size = int(
        4. * float(reduction_metadata.reduction_parameters[1]['KER_RAD'][0]) *
        fwhm_max)
    if kernel_size:
        if kernel_size % 2 == 0:
            kernel_size = kernel_size + 1
    # find the images that need to be processed
    all_images = reduction_metadata.find_all_images(setup,
                                                    reduction_metadata,
                                                    os.path.join(
                                                        setup.red_dir, 'data'),
                                                    log=log)

    new_images = reduction_metadata.find_images_need_to_be_process(
        setup, all_images, stage_number=5, rerun_all=None, log=log)

    kernel_directory_path = os.path.join(setup.red_dir, 'kernel')
    diffim_directory_path = os.path.join(setup.red_dir, 'diffim')
    if not os.path.exists(kernel_directory_path):
        os.mkdir(kernel_directory_path)
    if not os.path.exists(diffim_directory_path):
        os.mkdir(diffim_directory_path)
    reduction_metadata.update_column_to_layer('data_architecture',
                                              'KERNEL_PATH',
                                              kernel_directory_path)
    # difference images are written for verbosity level > 0
    reduction_metadata.update_column_to_layer('data_architecture',
                                              'DIFFIM_PATH',
                                              diffim_directory_path)
    data_image_directory = reduction_metadata.data_architecture[1][
        'IMAGES_PATH'][0]
    ref_directory_path = '.'
    #For a quick image subtraction, pre-calculate a sufficiently large u_matrix
    #based on the largest FWHM and store it to disk -> needs config switch

    try:
        reference_image_name = str(
            reduction_metadata.data_architecture[1]['REF_IMAGE'][0])
        reference_image_directory = str(
            reduction_metadata.data_architecture[1]['REF_PATH'][0])
        max_adu = 0.3 * float(
            reduction_metadata.reduction_parameters[1]['MAXVAL'][0])
        ref_row_index = np.where(
            reduction_metadata.images_stats[1]['IM_NAME'] == str(
                reduction_metadata.data_architecture[1]['REF_IMAGE'][0]))[0][0]
        ref_fwhm_x = reduction_metadata.images_stats[1][ref_row_index][
            'FWHM_X']
        ref_fwhm_y = reduction_metadata.images_stats[1][ref_row_index][
            'FWHM_Y']
        ref_sigma_x = ref_fwhm_x / (2. * (2. * np.log(2.))**0.5)
        ref_sigma_y = ref_fwhm_y / (2. * (2. * np.log(2.))**0.5)
        ref_stats = [ref_fwhm_x, ref_fwhm_y, ref_sigma_x, ref_sigma_y]
        logs.ifverbose(log, setup,
                       'Using reference image:' + reference_image_name)
    except Exception as e:
        log.ifverbose(log, setup, 'Reference/Images ! Abort stage5' + str(e))
        status = 'KO'
        report = 'No reference image found!'
        return status, report, reduction_metadata

    if not ('SHIFT_X' in reduction_metadata.images_stats[1].keys()) and (
            'SHIFT_Y' in reduction_metadata.images_stats[1].keys()):
        log.ifverbose(log, setup, 'No xshift! run stage4 ! Abort stage5')
        status = 'KO'
        report = 'No alignment data found!'
        return status, report, reduction_metadata

    if large_format_image == False:
        subtract_small_format_image(new_images,
                                    reference_image_name,
                                    reference_image_directory,
                                    reduction_metadata,
                                    setup,
                                    data_image_directory,
                                    kernel_size,
                                    max_adu,
                                    ref_stats,
                                    maxshift,
                                    kernel_directory_path,
                                    diffim_directory_path,
                                    log=log)
    #append some metric for the kernel, perhaps its scale factor...
    reduction_metadata.update_reduction_metadata_reduction_status(
        new_images, stage_number=5, status=1, log=log)
    logs.close_log(log)
    status = 'OK'
    report = 'Completed successfully'

    return status, report
예제 #19
0
def open_images(setup, ref_image_directory, data_image_directory, ref_image_name,
                data_image_name, kernel_size, max_adu, ref_extension = 0, 
                data_image_extension = 0, log = None):
	#to be updated with open_an_image ....
    '''
    Reference and data image needs to be opened jointly and bright pixels
    are masked on both images depending on the corresponding kernel size
    and max_adu

    :param object string: reference imagefilename
    :param object string: data image filename
    :param object string: reference image filename
    :param object string: data image filename
    :param object integer: kernel size edge length of the kernel in px
    :param object float: index of the maximum adu values
    :return: images, mask
    '''

    logs.ifverbose(log, setup,
                   'Attempting to open data image ' + os.path.join(data_image_directory, data_image_name))

    data_image = fits.open(os.path.join(data_image_directory_path, data_image_name), mmap=True)

    logs.ifverbose(log, setup,
                   'Attempting to open ref image ' + os.path.join(ref_image_directory, ref_image_name))

    ref_image = fits.open(os.path.join(ref_image_directory_path, ref_image_name), mmap=True)

	#increase kernel size by 2 and define circular mask
    kernel_size_plus = kernel_size + 2
    mask_kernel = np.ones(kernel_size_plus * kernel_size_plus, dtype=float)
    mask_kernel = mask_kernel.reshape((kernel_size_plus, kernel_size_plus))
    xyc = int(kernel_size_plus / 2)
    radius_square = (xyc)**2
    for idx in range(kernel_size_plus):
        for jdx in range(kernel_size_plus):
            if (idx - xyc)**2 + (jdx - xyc)**2 >= radius_square:
                mask_kernel[idx, jdx] = 0.

    #subtract background estimate using 10% percentile
    ref10pc = np.percentile(ref_image[ref_extension].data, 10.)
    ref_image[ref_extension].data = ref_image[ref_extension].data - \
        np.percentile(ref_image[ref_extension].data, 10.)

    logs.ifverbose(log, setup,
                   'Background reference= ' + str(ref10pc))

    # extend image size for convolution and kernel solution
    data_extended = np.zeros((np.shape(data_image[data_image_extension].data)[
                             0] + 2 * kernel_size, np.shape(data_image[data_image_extension].data)[1] + 2 * kernel_size))
    ref_extended = np.zeros((np.shape(ref_image[ref_image_extension].data)[
                            0] + 2 * kernel_size, np.shape(ref_image[ref_image_extension].data)[1] + 2 * kernel_size))
    data_extended[kernel_size:-kernel_size, kernel_size:-
                  kernel_size] = np.array(data_image[data_image_extension].data, float)
    ref_extended[kernel_size:-kernel_size, kernel_size:-
                 kernel_size] = np.array(ref_image[ref_image_extension].data, float)
    
    #apply consistent mask
    ref_bright_mask = ref_extended > max_adu + ref10pc
    data_bright_mask = data_extended > max_adu
    mask_propagate = np.zeros(np.shape(data_extended))
    mask_propagate[ref_bright_mask] = 1.
    mask_propagate[data_bright_mask] = 1.
    #increase mask size to kernel size
    mask_propagate = convolve2d(mask_propagate, mask_kernel, mode='same')
    bright_mask = mask_propagate > 0.
    ref_extended[bright_mask] = 0.
    data_extended[bright_mask] = 0.

    return ref_extended, data_extended, bright_mask