def combine_colour_datasets():
    """Function to plot colour magnitude and colour-colour plots"""
    
    datasets = {'ip': None, 'rp': None, 'gp': None,
                'ip_images': None, 'rp_images': None, 'gp_images': None}
    
    params = get_args()
    
    log = logs.start_stage_log( params['red_dir'], 'combine_colour_datasets', version=VERSION )
    
    for f in ['ip', 'rp', 'gp']:
        
        if params[f] != None:
            
            (datasets[f],datasets[f+'_images']) = extract_star_catalog(params,f,log)
        
    if datasets.values().count(None) >= 2:
        
        log.info('ERROR: Data available for only 1 passband, cannot produce figures')
        logs.close_log(log)

        exit()
        
    (combined_catalog,col_names,formats,units,f1,f2,f3) = combine_star_catalogs(datasets,log)
    
    image_trios = identify_image_trios(params,datasets,log)
    
    output_combined_catalog(combined_catalog,col_names,formats,units,f1,f2,f3,
                            image_trios,params,log)
    
    logs.close_log(log)
def test_calculate_separations_on_sky():

    log = logs.start_stage_log( '.', 'test_compare_crossmatch' )

    nstars = 10
    id_start = 405644951584364160

    data = [ Column(name='x1', data=np.arange(1.0,float(nstars+1),1.0)),
             Column(name='y1', data=np.arange(1.0,float(nstars+1),1.0)),
             Column(name='ra1', data=np.linspace(250.0, 265.0, nstars)),
             Column(name='dec1', data=np.linspace(-25.0, -28.0, nstars)),
             Column(name='gaia_id1', data=np.arange(id_start, id_start+nstars, 1)),

             Column(name='x2', data=np.arange(2.0,float(nstars+2),1.0)),
             Column(name='y2', data=np.arange(2.0,float(nstars+2),1.0)),
             Column(name='ra2', data=np.linspace(250.001, 265.001, nstars)),
             Column(name='dec2', data=np.linspace(-25.001, -28.001, nstars)),
             Column(name='gaia_id2', data=np.arange(id_start, id_start+nstars, 1)),

             Column(name='separation', data=np.zeros(nstars)) ]
    matched_data = Table(data)

    matched_data = compare_xmatches.calculate_separations_on_sky(matched_data, log)

    assert( (matched_data['separations'] != 0.0).all() )

    logs.close_log(log)
def compare_catalog_xmatch_between_reductions():

    params = get_args()

    log = logs.start_stage_log(params['log_dir'], 'compare_crossmatch')

    meta1 = metadata.MetaData()
    meta1.load_all_metadata(params['red_dir1'], 'pyDANDIA_metadata.fits')
    log.info('Loaded metadata from ' + params['red_dir1'])
    meta2 = metadata.MetaData()
    meta2.load_all_metadata(params['red_dir2'], 'pyDANDIA_metadata.fits')
    log.info('Loaded metadata from ' + params['red_dir2'])

    # Crossmatch the catalogs by x, y pixel positions
    (meta1_matches,
     meta2_matches) = crossmatch_pixel_positions_per_star(meta1,
                                                          meta2,
                                                          log,
                                                          threshold=1.0)

    matched_data = build_matched_arrays(meta1, meta2, meta1_matches,
                                        meta2_matches, log)

    matched_data = calculate_separations_on_sky(matched_data, log)

    # For each matching star, compare RA, Dec and Gaia ID if available
    compare_coordinates(params, matched_data, log)
    compare_gaia_ids(params, matched_data, log)

    logs.close_log(log)
示例#4
0
def calibrate_photometry():
    """Function to calculate the photometric transform between the instrumental
    magnitudes produced by the pyDANDIA pipeline and catalog data."""

    params = get_args()

    setup = pipeline_setup.pipeline_setup(params)

    log = logs.start_stage_log(setup.red_dir, 'phot_calib', version=VERSION)

    (reduction_metadata, params,
     star_catalog) = fetch_metadata(setup, params, log)

    star_catalog = select_good_detected_stars(star_catalog, params, log)

    vphas_cat = fetch_catalog_sources_within_image(params, log)

    vphas_cat = select_calibration_stars(vphas_cat, params, log)

    catalog_file = os.path.join(params['red_dir'], 'vphas_catalog.fits')

    match_index = match_stars_by_position(star_catalog, vphas_cat, log)

    #    catalog_utils.output_vphas_catalog_file(catalog_file,vphas_cat,match_index=match_index)

    fit = calc_phot_calib(params, star_catalog, vphas_cat, match_index, log)

    star_catalog = apply_phot_calib(star_catalog, fit, log)

    output_to_metadata(star_catalog, reduction_metadata, vphas_cat,
                       match_index, setup, params, log)
    logs.close_log(log)
示例#5
0
def compare_star_catalogs():
    """Function to compare the photometry from two different star catalogs"""
    
    params = get_args()
    
    log = logs.start_stage_log( '.', 'combine_colour_datasets' )
    
    star_cat1 = extract_star_catalog(params,'cat1',log)
    star_cat2 = extract_star_catalog(params,'cat2',log)

    match_index = cross_match_stars(star_cat1,star_cat2,log)
    
    plot_matched_ref_photometry(star_cat1,star_cat2,match_index,log)
    
    logs.close_log(log)
def calc_calibrated_lightcurve():
    """Function to apply a measured photometric offset to calibrate the 
    instrumental magnitudes provided in a lightcurve"""
    
    params = get_args()
    
    setup = pipeline_setup.pipeline_setup(params)
    
    log = logs.start_stage_log( setup.red_dir, 'lc_calib' )

    log.info('Calibrating lightcurve.  Initial parameters:')    
    for key, value in params.items():
        log.info(key+': '+str(value))
        
    (reduction_metadata, params, star_catalog) = fetch_metadata(setup,params,log)
    
    lc_data = read_rbn_lc(params['lc_file'],log)
    
    params = calc_mag_offset(params,lc_data,reduction_metadata,star_catalog,log)
    
    apply_mag_offset_to_lc(params,lc_data,log)
    
    logging.shutdown()
def test_crossmatch_pixel_positions():

    log = logs.start_stage_log( '.', 'test_compare_crossmatch' )

    meta1 = metadata.MetaData()
    nstars1 = 20000    # Must be greater than nstars2
    data = [Column(name='x', data=np.arange(0,nstars1,1)),
            Column(name='y', data=np.arange(0,nstars1,1))]
    setattr(meta1,'star_catalog',([],Table(data)))


    meta2 = metadata.MetaData()
    nstars2 = 1000
    data = [Column(name='x', data=np.arange(0.01,(nstars2+0.01),1)),
            Column(name='y', data=np.arange(0.01,(nstars2+0.01),1))]
    setattr(meta2,'star_catalog',([],Table(data)))

    threshold = 0.02

    # TEST 1: Find closest-matching stars
    (idx1, idx2) = compare_xmatches.crossmatch_pixel_positions(meta1, meta2, log)

    # As the number of stars in the two catalogs isn't necessarily the same,
    # but the pixel positions are a monotonically increasing sequence,
    # the nearest match to the later stars in the longer catalog will be the
    # last star in the shorter catalog.
    assert( idx1[0:nstars2] == idx2[0:nstars2] ).all()
    assert( idx2[nstars2:] == nstars2-1 ).all()

    # TEST 2: Require less than allowed separation for a match:
    (idx1, idx2) = compare_xmatches.crossmatch_pixel_positions(meta1, meta2, log,
                                                                threshold)
    assert( idx1[0:nstars2] == idx2[0:nstars2] ).all()
    assert(len(idx1) == nstars2)

    logs.close_log(log)
示例#8
0
def run_stage0(setup):
    """Main driver function to run stage 0: data preparation.    
    The tasks of this stage are to ensure that all images are prepared for 
    reduction, and to make sure the reduction metadata is up to date.
    Input: setup - is an instance of the ReductionSetup class. See 
           reduction_control.py
    Output: prepares the metadata file
    """

    stage0_version = 'stage0 v0.1'

    log = logs.start_stage_log(setup.red_dir, 'stage0', version=stage0_version)
    log.info('Setup:\n' + setup.summary() + '\n')

    # find and update the pipeline config
    pipeline_config = read_the_config_file(setup.pipeline_config_dir, log=log)

    reduction_metadata = create_or_load_the_reduction_metadata(
        setup, setup.red_dir, metadata_name='pyDANDIA_metadata.fits', log=log)

    update_reduction_metadata_with_config_file(reduction_metadata,
                                               pipeline_config,
                                               log=log)

    # find all images

    all_images = reduction_metadata.find_all_images(setup,
                                                    reduction_metadata,
                                                    os.path.join(
                                                        setup.red_dir, 'data'),
                                                    log=log)

    # find and update the inst pipeline config

    image_name = all_images[0]

    inst_config_file_name = find_the_inst_config_file_name(
        setup,
        reduction_metadata,
        image_name,
        setup.pipeline_config_dir,
        image_index=0,
        log=None)

    inst_config = read_the_inst_config_file(setup.pipeline_config_dir,
                                            inst_config_file_name,
                                            log=log)
    update_reduction_metadata_with_inst_config_file(reduction_metadata,
                                                    inst_config,
                                                    log=log)

    # find images need to be run, based on the metadata file, if any. If rerun_all = True, force a rereduction

    new_images = reduction_metadata.find_images_need_to_be_process(
        setup, all_images, stage_number=0, rerun_all=None, log=log)
    # create new rows on reduction status for new images

    reduction_metadata.update_reduction_metadata_reduction_status(
        new_images, stage_number=0, status=0, log=log)

    # construct the stamps if needed
    if reduction_metadata.stamps[1]:
        pass
    else:

        open_image = open_an_image(
            setup,
            reduction_metadata.data_architecture[1]['IMAGES_PATH'][0],
            new_images[0],
            image_index=0,
            log=log)

        update_reduction_metadata_stamps(setup,
                                         reduction_metadata,
                                         open_image,
                                         stamp_size=None,
                                         arcseconds_stamp_size=(60, 60),
                                         pixel_scale=None,
                                         number_of_overlaping_pixels=25,
                                         log=log)

    if len(new_images) > 0:

        update_reduction_metadata_headers_summary_with_new_images(
            setup, reduction_metadata, new_images, log=log)

        set_bad_pixel_mask_directory(setup,
                                     reduction_metadata,
                                     bpm_directory_path=os.path.join(
                                         setup.red_dir, 'data'),
                                     log=log)

        logs.ifverbose(log, setup,
                       'Updating metadata with info on new images...')

        for new_image in new_images:
            open_image = open_an_image(
                setup,
                reduction_metadata.data_architecture[1]['IMAGES_PATH'][0],
                new_image,
                image_index=0,
                log=log)

            bad_pixel_mask = open_an_image(
                setup,
                reduction_metadata.data_architecture[1]['BPM_PATH'][0],
                new_image,
                image_index=2,
                log=log)

            # Occasionally, the LCO BANZAI pipeline fails to produce an image
            # catalogue for an image.  If this happens, there will only be 2
            # extensions to the FITS image HDU, the PrimaryHDU (main image data)
            # and the ImageHDU (BPM).
            if bad_pixel_mask == None:

                bad_pixel_mask = open_an_image(
                    setup,
                    reduction_metadata.data_architecture[1]['BPM_PATH'][0],
                    new_image,
                    image_index=1,
                    log=log)

            master_mask = construct_the_pixel_mask(open_image,
                                                   bad_pixel_mask, [1, 3],
                                                   saturation_level=65535,
                                                   low_level=0,
                                                   log=log)

            save_the_pixel_mask_in_image(reduction_metadata, new_image,
                                         master_mask)
            logs.ifverbose(log, setup, ' -> ' + new_image)

    reduction_metadata.update_reduction_metadata_reduction_status(
        new_images, stage_number=0, status=1, log=log)

    reduction_metadata.save_updated_metadata(
        reduction_metadata.data_architecture[1]['OUTPUT_DIRECTORY'][0],
        reduction_metadata.data_architecture[1]['METADATA_NAME'][0],
        log=log)

    logs.close_log(log)

    status = 'OK'
    report = 'Completed successfully'

    return status, report, reduction_metadata
示例#9
0
def run_stage2(setup):
    """Main driver function to run stage 2: reference selection.

    This stage is processing the metadata file, looks for the output of
    stages0 and stage1 and checks if a reference file already
    exists.

    It creates a reference frame based on the selection criteria
    defined in the configuration. If no such configuration exists, it
    falls back to a standard configuration.

    If stage1 has failed to produce output it selects a reference
    based on header information.

    It always re-runs when called, since it is a lightweight function
    """

    stage2_version = 'stage2 v0.1'

    log = logs.start_stage_log(setup.red_dir, 'stage2', version=stage2_version)
    log.info('Setup:\n' + setup.summary() + '\n')

    reduction_metadata = metadata.MetaData()

    # Load all metadata
    try:
        reduction_metadata.load_all_metadata(
            metadata_directory=setup.red_dir,
            metadata_name='pyDANDIA_metadata.fits')

        # Check data inventory on metadata
        log.info('stage2 has loaded the reduction metadata')
    except Exception as estr:
        log.info('Could not load metadata!' + repr(estr))
        status = 'FAILED'
        report = 'Loading metadata failed:' + repr(estr)
        return status, report

    try:
        n_images = len(reduction_metadata.images_stats)
    except AttributeError:
        log.info('stage2: data inventory missing.')
        status = 'FAILED'
        report = 'Data inventory (stage1) missing.'
        logs.close_log(log)
        return status, report

    # All parameters are part of metadata

    table_structure = [['IMAGE_NAME', 'MOON_STATUS', 'RANKING_KEY'],
                       ['S100', 'S100', 'float'], ['degree', None, None]]

    all_images = reduction_metadata.find_all_images(setup,
                                                    reduction_metadata,
                                                    os.path.join(
                                                        setup.red_dir, 'data'),
                                                    log=log)

    reduction_metadata.create_a_new_layer(layer_name='reference_inventory',
                                          data_structure=table_structure,
                                          data_columns=None)

    log.info('Create reference frame inventory table in metadata')

    # Iterate over images that are in the stage inventory

    reference_ranking = []

    fwhm_max = 0.
    for stats_entry in reduction_metadata.images_stats[1]:
        if float(stats_entry['FWHM_X']) > fwhm_max:
            fwhm_max = stats_entry['FWHM_X']
        if float(stats_entry['FWHM_Y']) > fwhm_max:
            fwhm_max = stats_entry['FWHM_Y']

    # taking filenames from headers_summary (stage1 change pending)
    filename_images = reduction_metadata.images_stats[1]['IM_NAME']
    data_image_directory = reduction_metadata.data_architecture[1][
        'IMAGES_PATH'][0]
    max_adu = float(reduction_metadata.reduction_parameters[1]['MAXVAL'][0])
    psf_size = int(
        4. * float(reduction_metadata.reduction_parameters[1]['KER_RAD'][0]) *
        fwhm_max)
    empirical_psf_flag = False
    if empirical_psf_flag == True:

        for stats_entry in reduction_metadata.images_stats[1]:
            image_filename = stats_entry[0]
            row_idx = np.where(reduction_metadata.images_stats[1]['IM_NAME'] ==
                               image_filename)[0][0]
            moon_status = 'dark'
            # to be reactivated as soon as it is part of metadata
            if 'MOONFKEY' in reduction_metadata.headers_summary[1].keys(
            ) and 'MOONDKEY' in reduction_metadata.headers_summary[1].keys():
                moon_status = moon_brightness_header(
                    reduction_metadata.headers_summary[1], row_idx)

            fwhm_arcsec = (float(stats_entry['FWHM_X'])**2 + float(
                stats_entry['FWHM_Y'])**2)**0.5 * float(
                    reduction_metadata.reduction_parameters[1]['PIX_SCALE'])
            # extract data inventory row for image and calculate sorting key
            # if a sufficient number of stars has been detected at s1 (40)
            if int(stats_entry['NSTARS']) > 34 and fwhm_arcsec < 3. and (
                    not 'bright' in moon_status):
                hdulist = fits.open(os.path.join(data_image_directory,
                                                 image_filename),
                                    memmap=True)
                image = hdulist[0].data
                ranking_key = empirical_psf_simple.empirical_snr_subframe(
                    image, psf_size, max_adu)
                hdulist.close()
                reference_ranking.append([image_filename, ranking_key])
                entry = [image_filename, moon_status, ranking_key]
                reduction_metadata.add_row_to_layer(
                    key_layer='reference_inventory', new_row=entry)

    else:
        for stats_entry in reduction_metadata.images_stats[1]:
            image_filename = stats_entry[0]
            row_idx = np.where(reduction_metadata.images_stats[1]['IM_NAME'] ==
                               image_filename)[0][0]
            moon_status = 'dark'
            # to be reactivated as soon as it is part of metadata
            if 'MOONFKEY' in reduction_metadata.headers_summary[1].keys(
            ) and 'MOONDKEY' in reduction_metadata.headers_summary[1].keys():
                moon_status = moon_brightness_header(
                    reduction_metadata.headers_summary[1], row_idx)

            fwhm_arcsec = (float(stats_entry['FWHM_X'])**2 + float(
                stats_entry['FWHM_Y'])**2)**0.5 * float(
                    reduction_metadata.reduction_parameters[1]['PIX_SCALE'])
            # extract data inventory row for image and calculate sorting key
            # if a sufficient number of stars has been detected at s1 (40)
            if int(stats_entry['NSTARS']) > 34 and fwhm_arcsec < 3. and (
                    not 'bright' in moon_status):
                ranking_key = add_stage1_rank(reduction_metadata, stats_entry)
                reference_ranking.append([image_filename, ranking_key])
                entry = [image_filename, moon_status, ranking_key]
                reduction_metadata.add_row_to_layer(
                    key_layer='reference_inventory', new_row=entry)

    #relax criteria...
    if reference_ranking == []:
        for stats_entry in reduction_metadata.images_stats[1]:
            image_filename = stats_entry[0]
            row_idx = np.where(reduction_metadata.images_stats[1]['IM_NAME'] ==
                               image_filename)[0][0]
            moon_status = 'dark'
            # to be reactivated as soon as it is part of metadata
            if 'MOONFKEY' in reduction_metadata.headers_summary[1].keys(
            ) and 'MOONDKEY' in reduction_metadata.headers_summary[1].keys():
                moon_status = moon_brightness_header(
                    reduction_metadata.headers_summary[1], row_idx)

            fwhm_arcsec = (float(stats_entry['FWHM_X'])**2 + float(
                stats_entry['FWHM_Y'])**2)**0.5 * float(
                    reduction_metadata.reduction_parameters[1]['PIX_SCALE'])
            # extract data inventory row for image and calculate sorting key
            if int(stats_entry['NSTARS']) > 20. and fwhm_arcsec < 3.:
                ranking_key = add_stage1_rank(reduction_metadata, stats_entry)
                reference_ranking.append([image_filename, ranking_key])
                entry = [image_filename, moon_status, ranking_key]
                reduction_metadata.add_row_to_layer(
                    key_layer='reference_inventory', new_row=entry)

    # Save the updated layer to the metadata file
    reduction_metadata.save_a_layer_to_file(
        metadata_directory=setup.red_dir,
        metadata_name='pyDANDIA_metadata.fits',
        key_layer='reference_inventory')

    if reference_ranking != []:
        best_image = sorted(reference_ranking, key=itemgetter(1))[-1]
        ref_directory_path = os.path.join(setup.red_dir, 'ref')
        if not os.path.exists(ref_directory_path):
            os.mkdir(ref_directory_path)

        ref_img_path = os.path.join(
            str(reduction_metadata.data_architecture[1]['IMAGES_PATH'][0]),
            best_image[0])

        print('New reference ' + best_image[0] + ' in ' + ref_img_path)

        try:
            copyfile(
                reduction_metadata.data_architecture[1]['IMAGES_PATH'][0] +
                '/' + best_image[0], ref_directory_path + '/' + best_image[0])
        except:
            print('copy ref failed: ', best_image[0])

        if not 'REF_PATH' in reduction_metadata.data_architecture[1].keys():
            reduction_metadata.add_column_to_layer('data_architecture',
                                                   'REF_PATH',
                                                   [ref_directory_path],
                                                   new_column_format=None,
                                                   new_column_unit=None)
        else:
            reduction_metadata.update_a_cell_to_layer('data_architecture', 0,
                                                      'REF_PATH',
                                                      ref_directory_path)
        if not 'REF_IMAGE' in reduction_metadata.data_architecture[1].keys():
            reduction_metadata.add_column_to_layer(
                'data_architecture',
                'REF_IMAGE', [os.path.basename(ref_img_path)],
                new_column_format=None,
                new_column_unit=None)
        else:
            reduction_metadata.update_a_cell_to_layer(
                'data_architecture', 0, 'REF_IMAGE',
                os.path.basename(ref_img_path))
        # Update the REDUCTION_STATUS table in metadata for stage 2

        reduction_metadata.update_reduction_metadata_reduction_status(
            all_images, stage_number=1, status=1, log=log)
        reduction_metadata.save_updated_metadata(
            metadata_directory=setup.red_dir,
            metadata_name='pyDANDIA_metadata.fits')

        status = 'OK'
        report = 'Completed successfully'
        log.info('Updating metadata with info on new images...')
        logs.close_log(log)

        return status, report

    else:
        status = 'FAILED'
        report = 'No suitable image found.'

        log.info('No reference image found...')
        logs.close_log(log)

        return status, report
示例#10
0
def run_stage6(setup):
    """Main driver function to run stage 6: image substraction and photometry.
    This stage align the images to the reference frame!
    :param object setup : an instance of the ReductionSetup class. See reduction_control.py

    :return: [status, report, reduction_metadata], the stage4 status, the report, the metadata file
    :rtype: array_like

    """

    stage6_version = 'stage6 v0.1'

    log = logs.start_stage_log(setup.red_dir, 'stage6', version=stage6_version)
    log.info('Setup:\n' + setup.summary() + '\n')

    # find the metadata
    reduction_metadata = metadata.MetaData()
    reduction_metadata.load_all_metadata(setup.red_dir, 'pyDANDIA_metadata.fits')

    # find the images needed to treat
    all_images = reduction_metadata.find_all_images(setup, reduction_metadata,
                                                    os.path.join(setup.red_dir, 'data'), log=log)

    new_images = reduction_metadata.find_images_need_to_be_process(setup, all_images,
                                                                   stage_number=6, rerun_all=None, log=log)

    # find the starlist
    starlist =  reduction_metadata.star_catalog[1]     

    max_x = np.max(starlist['x_pixel'].data)
    max_y = np.max(starlist['y_pixel'].data)
    mask  = (starlist['psf_star'].data == 1) & (starlist['x_pixel'].data<max_x-25)  & (starlist['x_pixel'].data>25) & (starlist['y_pixel'].data<max_y-25)  & (starlist['y_pixel'].data>25)

    control_stars = starlist[mask][:10]
    star_coordinates = np.c_[control_stars['star_index'].data,
                             control_stars['x_pixel'].data,
                             control_stars['y_pixel'].data]

    for index,key in enumerate(starlist.columns.keys()):
    
        if index != 0:

     
         ref_star_catalog = np.c_[ref_star_catalog,starlist[key].data]

        else:
          
         ref_star_catalog = starlist[key].data



    psf_model = fits.open(reduction_metadata.data_architecture[1]['REF_PATH'].data[0]+'/psf_model.fits')

    psf_type = psf_model[0].header['PSFTYPE']
    psf_parameters = [0, psf_model[0].header['Y_CENTER'],
                      psf_model[0].header['X_CENTER'],
                      psf_model[0].header['GAMMA'],
                      psf_model[0].header['ALPHA']]       
    
 
    sky_model = sky_background.model_sky_background(setup,
                                        reduction_metadata,log,ref_star_catalog)


    psf_model = psf.get_psf_object( psf_type )
    psf_model.update_psf_parameters( psf_parameters)

    ind = ((starlist['x_pixel']-150)**2<1) & ((starlist['y_pixel']-150)**2<1)
    print (np.argmin(((starlist['x_pixel']-150)**2) + ((starlist['y_pixel']-150)**2)))
    if len(new_images) > 0:

        # find the reference image
        try:
            reference_image_name = reduction_metadata.data_architecture[1]['REF_IMAGE'].data[0]
            reference_image_directory = reduction_metadata.data_architecture[1]['REF_PATH'].data[0]
            reference_image,date = open_an_image(setup, reference_image_directory, reference_image_name, image_index=0,
                                            log=None)
                                            
            ref_image_name = reduction_metadata.data_architecture[1]['REF_IMAGE'].data[0]
            index_reference = np.where(ref_image_name == reduction_metadata.headers_summary[1]['IMAGES'].data)[0][0]
            ref_exposure_time = float(reduction_metadata.headers_summary[1]['EXPKEY'].data[index_reference])
   
            logs.ifverbose(log, setup,
                           'I found the reference frame:' + reference_image_name)
        except KeyError:
            logs.ifverbose(log, setup,
                           'I can not find any reference image! Aboard stage6')

            status = 'KO'
            report = 'No reference frame found!'

            return status, report

        # find the kernels directory
        try:

            kernels_directory = reduction_metadata.data_architecture[1]['OUTPUT_DIRECTORY'].data[0]+'kernel/'

            logs.ifverbose(log, setup,
                           'I found the kernels directory:' + kernels_directory)
        except KeyError:
            logs.ifverbose(log, setup,
                           'I can not find the kernels directory! Aboard stage6')

            status = 'KO'
            report = 'No kernels directory found!'

            return status, report

        data = []
        diffim_directory = reduction_metadata.data_architecture[1]['OUTPUT_DIRECTORY'].data[0]+'diffim/'
        images_directory = reduction_metadata.data_architecture[1]['IMAGES_PATH'].data[0]
        phot = np.zeros((len(new_images),len(ref_star_catalog),16))
        time = []
        for idx,new_image in enumerate(new_images):

            log.info('Starting difference photometry of '+new_image)
            target_image,date = open_an_image(setup, images_directory, new_image, image_index=0, log=None)
            kernel_image,kernel_error,kernel_bkg = find_the_associated_kernel(setup, kernels_directory, new_image)
         
            difference_image = image_substraction(setup, reduction_metadata,reference_image, kernel_image, new_image)-kernel_bkg
         

            time.append(date)

            save_control_stars_of_the_difference_image(setup, new_image, difference_image, star_coordinates)

            photometric_table, control_zone = photometry_on_the_difference_image(setup, reduction_metadata, log,ref_star_catalog,difference_image,  psf_model, sky_model, kernel_image,kernel_error, ref_exposure_time)
         
            phot[idx,:,:] = photometric_table

            #save_control_zone_of_residuals(setup, new_image, control_zone)     

            #ingest_photometric_table_in_db(setup, photometric_table) 
    import pdb; pdb.set_trace()
    import matplotlib.pyplot as plt 
    ind = ((starlist['x_pixel']-150)**2<1) & ((starlist['y_pixel']-150)**2<1)
    plt.errorbar(time,phot[:,ind,8],fmt='.k')
    
    
    plt.show()
    import pdb; pdb.set_trace()
    return status, report
示例#11
0
def run_stage5(setup):
    """Main driver function to run stage 5: kernel_solution
    This stage finds the kernel solution and (optionally) subtracts the model
    image
    :param object setup : an instance of the ReductionSetup class. See reduction_control.py

    :return: [status, report, reduction_metadata], stage5 status, report, 
     metadata file
    :rtype: array_like
    """

    stage5_version = 'stage5 v0.1'

    log = logs.start_stage_log(setup.red_dir, 'stage5', version=stage5_version)
    log.info('Setup:\n' + setup.summary() + '\n')
    try:
        from umatrix_routine import umatrix_construction, umatrix_bvector_construction, bvector_construction

    except ImportError:
        log.info(
            'Uncompiled cython code, please run setup.py: e.g.\n python setup.py build_ext --inplace'
        )
        status = 'KO'
        report = 'Uncompiled cython code, please run setup.py: e.g.\n python setup.py build_ext --inplace'
        return status, report

    # find the metadata
    reduction_metadata = metadata.MetaData()
    reduction_metadata.load_all_metadata(setup.red_dir,
                                         'pyDANDIA_metadata.fits')

    #determine kernel size based on maximum FWHM
    fwhm_max = 0.
    shift_max = 0
    for stats_entry in reduction_metadata.images_stats[1]:
        if float(stats_entry['FWHM_X']) > fwhm_max:
            fwhm_max = stats_entry['FWHM_X']
        if float(stats_entry['FWHM_Y']) > fwhm_max:
            fwhm_max = stats_entry['FWHM_Y']
        if abs(float(stats_entry['SHIFT_X'])) > shift_max:
            shift_max = abs(float(stats_entry['SHIFT_X']))
        if abs(float(stats_entry['SHIFT_Y'])) > shift_max:
            shift_max = abs(float(stats_entry['SHIFT_Y']))
    maxshift = int(shift_max) + 2
    #image smaller or equal 500x500
    large_format_image = False

    sigma_max = fwhm_max / (2. * (2. * np.log(2.))**0.5)
    # Factor 4 corresponds to the radius of 2*FWHM the old pipeline
    kernel_size = int(
        4. * float(reduction_metadata.reduction_parameters[1]['KER_RAD'][0]) *
        fwhm_max)
    if kernel_size:
        if kernel_size % 2 == 0:
            kernel_size = kernel_size + 1
    # find the images that need to be processed
    all_images = reduction_metadata.find_all_images(setup,
                                                    reduction_metadata,
                                                    os.path.join(
                                                        setup.red_dir, 'data'),
                                                    log=log)

    new_images = reduction_metadata.find_images_need_to_be_process(
        setup, all_images, stage_number=5, rerun_all=None, log=log)

    kernel_directory_path = os.path.join(setup.red_dir, 'kernel')
    diffim_directory_path = os.path.join(setup.red_dir, 'diffim')
    if not os.path.exists(kernel_directory_path):
        os.mkdir(kernel_directory_path)
    if not os.path.exists(diffim_directory_path):
        os.mkdir(diffim_directory_path)
    reduction_metadata.update_column_to_layer('data_architecture',
                                              'KERNEL_PATH',
                                              kernel_directory_path)
    # difference images are written for verbosity level > 0
    reduction_metadata.update_column_to_layer('data_architecture',
                                              'DIFFIM_PATH',
                                              diffim_directory_path)
    data_image_directory = reduction_metadata.data_architecture[1][
        'IMAGES_PATH'][0]
    ref_directory_path = '.'
    #For a quick image subtraction, pre-calculate a sufficiently large u_matrix
    #based on the largest FWHM and store it to disk -> needs config switch

    try:
        reference_image_name = str(
            reduction_metadata.data_architecture[1]['REF_IMAGE'][0])
        reference_image_directory = str(
            reduction_metadata.data_architecture[1]['REF_PATH'][0])
        max_adu = 0.3 * float(
            reduction_metadata.reduction_parameters[1]['MAXVAL'][0])
        ref_row_index = np.where(
            reduction_metadata.images_stats[1]['IM_NAME'] == str(
                reduction_metadata.data_architecture[1]['REF_IMAGE'][0]))[0][0]
        ref_fwhm_x = reduction_metadata.images_stats[1][ref_row_index][
            'FWHM_X']
        ref_fwhm_y = reduction_metadata.images_stats[1][ref_row_index][
            'FWHM_Y']
        ref_sigma_x = ref_fwhm_x / (2. * (2. * np.log(2.))**0.5)
        ref_sigma_y = ref_fwhm_y / (2. * (2. * np.log(2.))**0.5)
        ref_stats = [ref_fwhm_x, ref_fwhm_y, ref_sigma_x, ref_sigma_y]
        logs.ifverbose(log, setup,
                       'Using reference image:' + reference_image_name)
    except Exception as e:
        log.ifverbose(log, setup, 'Reference/Images ! Abort stage5' + str(e))
        status = 'KO'
        report = 'No reference image found!'
        return status, report, reduction_metadata

    if not ('SHIFT_X' in reduction_metadata.images_stats[1].keys()) and (
            'SHIFT_Y' in reduction_metadata.images_stats[1].keys()):
        log.ifverbose(log, setup, 'No xshift! run stage4 ! Abort stage5')
        status = 'KO'
        report = 'No alignment data found!'
        return status, report, reduction_metadata

    if large_format_image == False:
        subtract_small_format_image(new_images,
                                    reference_image_name,
                                    reference_image_directory,
                                    reduction_metadata,
                                    setup,
                                    data_image_directory,
                                    kernel_size,
                                    max_adu,
                                    ref_stats,
                                    maxshift,
                                    kernel_directory_path,
                                    diffim_directory_path,
                                    log=log)
    #append some metric for the kernel, perhaps its scale factor...
    reduction_metadata.update_reduction_metadata_reduction_status(
        new_images, stage_number=5, status=1, log=log)
    logs.close_log(log)
    status = 'OK'
    report = 'Completed successfully'

    return status, report
示例#12
0
def run_stage4(setup):
    """Main driver function to run stage 4: image alignement.
    This stage align the images to the reference frame!
    :param object setup : an instance of the ReductionSetup class. See reduction_control.py

    :return: [status, report, reduction_metadata], the stage4 status, the report, the metadata file
    :rtype: array_like

    """

    stage4_version = 'stage4 v0.1'

    log = logs.start_stage_log(setup.red_dir, 'stage4', version=stage4_version)
    log.info('Setup:\n' + setup.summary() + '\n')

    # find the metadata
    reduction_metadata = metadata.MetaData()
    reduction_metadata.load_all_metadata(setup.red_dir,
                                         'pyDANDIA_metadata.fits')

    # find the images needed to treat
    all_images = reduction_metadata.find_all_images(setup,
                                                    reduction_metadata,
                                                    os.path.join(
                                                        setup.red_dir, 'data'),
                                                    log=log)

    new_images = reduction_metadata.find_images_need_to_be_process(
        setup, all_images, stage_number=4, rerun_all=None, log=log)

    if len(new_images) > 0:

        # find the reference image
        try:
            reference_image_name = reduction_metadata.data_architecture[1][
                'REF_IMAGE'].data[0]
            reference_image_directory = reduction_metadata.data_architecture[
                1]['REF_PATH'].data[0]
            reference_image = open_an_image(setup,
                                            reference_image_directory,
                                            reference_image_name,
                                            image_index=0,
                                            log=None)
            logs.ifverbose(
                log, setup,
                'I found the reference frame:' + reference_image_name)
        except KeyError:
            logs.ifverbose(log, setup,
                           'I can not find any reference image! Abort stage4')

            status = 'KO'
            report = 'No reference frame found!'

            return status, report

        data = []
        images_directory = reduction_metadata.data_architecture[1][
            'IMAGES_PATH'].data[0]
        for new_image in new_images:
            target_image = open_an_image(setup,
                                         images_directory,
                                         new_image,
                                         image_index=0,
                                         log=None)

            try:
                x_new_center, y_new_center, x_shift, y_shift = find_x_y_shifts_from_the_reference_image(
                    setup,
                    reference_image,
                    target_image,
                    edgefraction=0.5,
                    log=None)

                data.append([new_image, x_shift, y_shift])
                logs.ifverbose(
                    log, setup,
                    'I found the image translation to the reference for frame:'
                    + new_image)

            except:

                logs.ifverbose(
                    log, setup,
                    'I can not find the image translation to the reference for frame:'
                    + new_image + '. Abort stage4!')

                status = 'KO'
                report = 'No shift  found for image:' + new_image + ' !'

                return status, report

        if ('SHIFT_X' in reduction_metadata.images_stats[1].keys()) and (
                'SHIFT_Y' in reduction_metadata.images_stats[1].keys()):

            for index in range(len(data)):
                target_image = data[index][0]
                x_shift = data[index][1]
                y_shift = data[index][2]
                row_index = np.where(reduction_metadata.images_stats[1]
                                     ['IM_NAME'].data == new_image)[0][0]
                reduction_metadata.update_a_cell_to_layer(
                    'images_stats', row_index, 'SHIFT_X', x_shift)
                reduction_metadata.update_a_cell_to_layer(
                    'images_stats', row_index, 'SHIFT_Y', y_shift)
                logs.ifverbose(log, setup,
                               'Updated metadata for image: ' + target_image)
        else:
            logs.ifverbose(log, setup,
                           'I have to construct SHIFT_X and SHIFT_Y columns')

            sorted_data = np.copy(data)

            for index in range(len(data)):
                target_image = data[index][0]

                row_index = np.where(reduction_metadata.images_stats[1]
                                     ['IM_NAME'].data == new_image)[0][0]

                sorted_data[row_index] = data[index]

            column_format = 'int'
            column_unit = 'pix'
            reduction_metadata.add_column_to_layer(
                'images_stats',
                'SHIFT_X',
                sorted_data[:, 1],
                new_column_format=column_format,
                new_column_unit=column_unit)

            reduction_metadata.add_column_to_layer(
                'images_stats',
                'SHIFT_Y',
                sorted_data[:, 2],
                new_column_format=column_format,
                new_column_unit=column_unit)

    reduction_metadata.update_reduction_metadata_reduction_status(
        new_images, stage_number=4, status=1, log=log)

    reduction_metadata.save_updated_metadata(
        reduction_metadata.data_architecture[1]['OUTPUT_DIRECTORY'][0],
        reduction_metadata.data_architecture[1]['METADATA_NAME'][0],
        log=log)

    logs.close_log(log)

    status = 'OK'
    report = 'Completed successfully'

    return status, report