def compare_catalog_xmatch_between_reductions():

    params = get_args()

    log = logs.start_stage_log(params['log_dir'], 'compare_crossmatch')

    meta1 = metadata.MetaData()
    meta1.load_all_metadata(params['red_dir1'], 'pyDANDIA_metadata.fits')
    log.info('Loaded metadata from ' + params['red_dir1'])
    meta2 = metadata.MetaData()
    meta2.load_all_metadata(params['red_dir2'], 'pyDANDIA_metadata.fits')
    log.info('Loaded metadata from ' + params['red_dir2'])

    # Crossmatch the catalogs by x, y pixel positions
    (meta1_matches,
     meta2_matches) = crossmatch_pixel_positions_per_star(meta1,
                                                          meta2,
                                                          log,
                                                          threshold=1.0)

    matched_data = build_matched_arrays(meta1, meta2, meta1_matches,
                                        meta2_matches, log)

    matched_data = calculate_separations_on_sky(matched_data, log)

    # For each matching star, compare RA, Dec and Gaia ID if available
    compare_coordinates(params, matched_data, log)
    compare_gaia_ids(params, matched_data, log)

    logs.close_log(log)
def test_calculate_separations_on_sky():

    log = logs.start_stage_log( '.', 'test_compare_crossmatch' )

    nstars = 10
    id_start = 405644951584364160

    data = [ Column(name='x1', data=np.arange(1.0,float(nstars+1),1.0)),
             Column(name='y1', data=np.arange(1.0,float(nstars+1),1.0)),
             Column(name='ra1', data=np.linspace(250.0, 265.0, nstars)),
             Column(name='dec1', data=np.linspace(-25.0, -28.0, nstars)),
             Column(name='gaia_id1', data=np.arange(id_start, id_start+nstars, 1)),

             Column(name='x2', data=np.arange(2.0,float(nstars+2),1.0)),
             Column(name='y2', data=np.arange(2.0,float(nstars+2),1.0)),
             Column(name='ra2', data=np.linspace(250.001, 265.001, nstars)),
             Column(name='dec2', data=np.linspace(-25.001, -28.001, nstars)),
             Column(name='gaia_id2', data=np.arange(id_start, id_start+nstars, 1)),

             Column(name='separation', data=np.zeros(nstars)) ]
    matched_data = Table(data)

    matched_data = compare_xmatches.calculate_separations_on_sky(matched_data, log)

    assert( (matched_data['separations'] != 0.0).all() )

    logs.close_log(log)
def combine_colour_datasets():
    """Function to plot colour magnitude and colour-colour plots"""
    
    datasets = {'ip': None, 'rp': None, 'gp': None,
                'ip_images': None, 'rp_images': None, 'gp_images': None}
    
    params = get_args()
    
    log = logs.start_stage_log( params['red_dir'], 'combine_colour_datasets', version=VERSION )
    
    for f in ['ip', 'rp', 'gp']:
        
        if params[f] != None:
            
            (datasets[f],datasets[f+'_images']) = extract_star_catalog(params,f,log)
        
    if datasets.values().count(None) >= 2:
        
        log.info('ERROR: Data available for only 1 passband, cannot produce figures')
        logs.close_log(log)

        exit()
        
    (combined_catalog,col_names,formats,units,f1,f2,f3) = combine_star_catalogs(datasets,log)
    
    image_trios = identify_image_trios(params,datasets,log)
    
    output_combined_catalog(combined_catalog,col_names,formats,units,f1,f2,f3,
                            image_trios,params,log)
    
    logs.close_log(log)
Example #4
0
def calibrate_photometry():
    """Function to calculate the photometric transform between the instrumental
    magnitudes produced by the pyDANDIA pipeline and catalog data."""

    params = get_args()

    setup = pipeline_setup.pipeline_setup(params)

    log = logs.start_stage_log(setup.red_dir, 'phot_calib', version=VERSION)

    (reduction_metadata, params,
     star_catalog) = fetch_metadata(setup, params, log)

    star_catalog = select_good_detected_stars(star_catalog, params, log)

    vphas_cat = fetch_catalog_sources_within_image(params, log)

    vphas_cat = select_calibration_stars(vphas_cat, params, log)

    catalog_file = os.path.join(params['red_dir'], 'vphas_catalog.fits')

    match_index = match_stars_by_position(star_catalog, vphas_cat, log)

    #    catalog_utils.output_vphas_catalog_file(catalog_file,vphas_cat,match_index=match_index)

    fit = calc_phot_calib(params, star_catalog, vphas_cat, match_index, log)

    star_catalog = apply_phot_calib(star_catalog, fit, log)

    output_to_metadata(star_catalog, reduction_metadata, vphas_cat,
                       match_index, setup, params, log)
    logs.close_log(log)
def reset_data_dirs(
        config_file='/home/ytsapras/test_data/data_reduction/config/auto_pipeline_config.json',
        verbose=False):
    '''This function will perform a recursive sub-directory search and reset it for a new reduction from
       scratch. In effect, it will remove everything from the data directory except the directory itself 
       and the data/ folder which contains the images. !!!USE WITH CAUTION!!!

    '''
    # Get configuration file
    config = config_utils.build_config_from_json(config_file)
    path_to_top_reduction_directory = config['data_red_dir']

    # Collect all subdir names, excluding logs/ and config/
    all_sub_directories = glob(
        path.join(path_to_top_reduction_directory, '[!logs!config]*/'))

    # Keep a log
    log = logs.start_pipeline_log(config['log_dir'], 'reset_reduction')

    # Loop over all subdirs removing everything except the data/ folders
    for subdir in all_sub_directories:
        contents = glob(path.join(subdir, '*'))
        contents.remove(path.join(subdir, 'data'))
        for item in contents:
            if path.isdir(item):
                try:
                    shutil.rmtree(item)
                    log.info("Removed folder: %s" % item)
                    if verbose == True:
                        print("Removed folder: %s" % item)
                except OSError:
                    log.info("Unable to remove folder: %s" % item)
                    if verbose == True:
                        print("Unable to remove folder: %s" % item)
                        print("Check read/write folder permissions.")
            else:
                if path.exists(item):
                    try:
                        remove(item)
                        log.info("Removed file: %s" % item)
                        if verbose == True:
                            print("Removed file: %s" % item)
                    except OSError:
                        log.info("Unable to remove file: %s" % item)
                        if verbose == True:
                            print("Unable to remove file: %s" % item)
                            print("Check read/write file permissions.")

    logs.close_log(log)
    return 1
Example #6
0
def compare_star_catalogs():
    """Function to compare the photometry from two different star catalogs"""
    
    params = get_args()
    
    log = logs.start_stage_log( '.', 'combine_colour_datasets' )
    
    star_cat1 = extract_star_catalog(params,'cat1',log)
    star_cat2 = extract_star_catalog(params,'cat2',log)

    match_index = cross_match_stars(star_cat1,star_cat2,log)
    
    plot_matched_ref_photometry(star_cat1,star_cat2,match_index,log)
    
    logs.close_log(log)
def reset_pydandia_reductions():

    params = get_args()

    config = config_utils.build_config_from_json(params['config_file'])

    log = logs.start_pipeline_log(config['log_dir'], 'dataset_reset')

    running_processes = automatic_pipeline.read_process_list(config,log)

    datasets = get_dataset_list(params,log)

    if params['reset_code'] == 'stage2_no_ref':
        reset_stage2_no_ref(params, datasets, log)

    else:
        print('Unrecognized reset code.  No action taken.')

    log.info('Completed reset process')
    logs.close_log(log)
def test_crossmatch_pixel_positions():

    log = logs.start_stage_log( '.', 'test_compare_crossmatch' )

    meta1 = metadata.MetaData()
    nstars1 = 20000    # Must be greater than nstars2
    data = [Column(name='x', data=np.arange(0,nstars1,1)),
            Column(name='y', data=np.arange(0,nstars1,1))]
    setattr(meta1,'star_catalog',([],Table(data)))


    meta2 = metadata.MetaData()
    nstars2 = 1000
    data = [Column(name='x', data=np.arange(0.01,(nstars2+0.01),1)),
            Column(name='y', data=np.arange(0.01,(nstars2+0.01),1))]
    setattr(meta2,'star_catalog',([],Table(data)))

    threshold = 0.02

    # TEST 1: Find closest-matching stars
    (idx1, idx2) = compare_xmatches.crossmatch_pixel_positions(meta1, meta2, log)

    # As the number of stars in the two catalogs isn't necessarily the same,
    # but the pixel positions are a monotonically increasing sequence,
    # the nearest match to the later stars in the longer catalog will be the
    # last star in the shorter catalog.
    assert( idx1[0:nstars2] == idx2[0:nstars2] ).all()
    assert( idx2[nstars2:] == nstars2-1 ).all()

    # TEST 2: Require less than allowed separation for a match:
    (idx1, idx2) = compare_xmatches.crossmatch_pixel_positions(meta1, meta2, log,
                                                                threshold)
    assert( idx1[0:nstars2] == idx2[0:nstars2] ).all()
    assert(len(idx1) == nstars2)

    logs.close_log(log)
Example #9
0
def run_starfind(setup, reduction_metadata):
    """
    Function to enable starfind to be run from the commandline
    
    :param object setup: this is an instance of the ReductionSetup class. See
                         reduction_control.py
    :param object reduction_metadata: the metadata object
    
    :return status, report: two strings reporting whether the stage was 
                            completed successfully
    :rtype string, string
    """

    params = {}

    params['ref_image'] = raw_input(
        'Please enter the path to the image to be analyzed: ')
    opt = raw_input(
        'Do you want diagnosic plots output? T or F [default: F]: ')
    if 'T' in str(opt).upper():
        params['plot'] = True
    else:
        params['plot'] = False

    log = logs.start_pipeline_log(params['red_dir'], 'starfind')

    setup = pipeline_setup(params)

    (status, report, params) = starfind(setup,
                                        params['ref_image'],
                                        reduction_metadata,
                                        plot_it=params['plot'],
                                        log=log)
    logs.close_log(log)

    return status, report
Example #10
0
def run_stage0(setup):
    """Main driver function to run stage 0: data preparation.    
    The tasks of this stage are to ensure that all images are prepared for 
    reduction, and to make sure the reduction metadata is up to date.
    Input: setup - is an instance of the ReductionSetup class. See 
           reduction_control.py
    Output: prepares the metadata file
    """

    stage0_version = 'stage0 v0.1'

    log = logs.start_stage_log(setup.red_dir, 'stage0', version=stage0_version)
    log.info('Setup:\n' + setup.summary() + '\n')

    # find and update the pipeline config
    pipeline_config = read_the_config_file(setup.pipeline_config_dir, log=log)

    reduction_metadata = create_or_load_the_reduction_metadata(
        setup, setup.red_dir, metadata_name='pyDANDIA_metadata.fits', log=log)

    update_reduction_metadata_with_config_file(reduction_metadata,
                                               pipeline_config,
                                               log=log)

    # find all images

    all_images = reduction_metadata.find_all_images(setup,
                                                    reduction_metadata,
                                                    os.path.join(
                                                        setup.red_dir, 'data'),
                                                    log=log)

    # find and update the inst pipeline config

    image_name = all_images[0]

    inst_config_file_name = find_the_inst_config_file_name(
        setup,
        reduction_metadata,
        image_name,
        setup.pipeline_config_dir,
        image_index=0,
        log=None)

    inst_config = read_the_inst_config_file(setup.pipeline_config_dir,
                                            inst_config_file_name,
                                            log=log)
    update_reduction_metadata_with_inst_config_file(reduction_metadata,
                                                    inst_config,
                                                    log=log)

    # find images need to be run, based on the metadata file, if any. If rerun_all = True, force a rereduction

    new_images = reduction_metadata.find_images_need_to_be_process(
        setup, all_images, stage_number=0, rerun_all=None, log=log)
    # create new rows on reduction status for new images

    reduction_metadata.update_reduction_metadata_reduction_status(
        new_images, stage_number=0, status=0, log=log)

    # construct the stamps if needed
    if reduction_metadata.stamps[1]:
        pass
    else:

        open_image = open_an_image(
            setup,
            reduction_metadata.data_architecture[1]['IMAGES_PATH'][0],
            new_images[0],
            image_index=0,
            log=log)

        update_reduction_metadata_stamps(setup,
                                         reduction_metadata,
                                         open_image,
                                         stamp_size=None,
                                         arcseconds_stamp_size=(60, 60),
                                         pixel_scale=None,
                                         number_of_overlaping_pixels=25,
                                         log=log)

    if len(new_images) > 0:

        update_reduction_metadata_headers_summary_with_new_images(
            setup, reduction_metadata, new_images, log=log)

        set_bad_pixel_mask_directory(setup,
                                     reduction_metadata,
                                     bpm_directory_path=os.path.join(
                                         setup.red_dir, 'data'),
                                     log=log)

        logs.ifverbose(log, setup,
                       'Updating metadata with info on new images...')

        for new_image in new_images:
            open_image = open_an_image(
                setup,
                reduction_metadata.data_architecture[1]['IMAGES_PATH'][0],
                new_image,
                image_index=0,
                log=log)

            bad_pixel_mask = open_an_image(
                setup,
                reduction_metadata.data_architecture[1]['BPM_PATH'][0],
                new_image,
                image_index=2,
                log=log)

            # Occasionally, the LCO BANZAI pipeline fails to produce an image
            # catalogue for an image.  If this happens, there will only be 2
            # extensions to the FITS image HDU, the PrimaryHDU (main image data)
            # and the ImageHDU (BPM).
            if bad_pixel_mask == None:

                bad_pixel_mask = open_an_image(
                    setup,
                    reduction_metadata.data_architecture[1]['BPM_PATH'][0],
                    new_image,
                    image_index=1,
                    log=log)

            master_mask = construct_the_pixel_mask(open_image,
                                                   bad_pixel_mask, [1, 3],
                                                   saturation_level=65535,
                                                   low_level=0,
                                                   log=log)

            save_the_pixel_mask_in_image(reduction_metadata, new_image,
                                         master_mask)
            logs.ifverbose(log, setup, ' -> ' + new_image)

    reduction_metadata.update_reduction_metadata_reduction_status(
        new_images, stage_number=0, status=1, log=log)

    reduction_metadata.save_updated_metadata(
        reduction_metadata.data_architecture[1]['OUTPUT_DIRECTORY'][0],
        reduction_metadata.data_architecture[1]['METADATA_NAME'][0],
        log=log)

    logs.close_log(log)

    status = 'OK'
    report = 'Completed successfully'

    return status, report, reduction_metadata
def clear_locks(
        config_file='/home/ytsapras/test_data/data_reduction/config/auto_pipeline_config.json',
        verbose=False):
    '''This function will perform a recursive sub-directory search and safely remove
       any dataset.lock files if the following conditions are met:
       (Note: The program will avoid all currently active reductions.)
       --> That the highest-number stage executed is stage2, and there are no logs for reference_astrometry
           or stage 3.  
       --> That there is no .fits image in the ref/ sub-directory, which may or may not exist
       --> Check the last entry of stage2.log. It should indicate that stage 2 is complete 
           ("Processing complete").
       --> Check that a dataset.lock file is present. 
       - If these conditions hold, the code should remove the dataset.lock file from the reduction 
           directory. 

    '''
    # Get configuration file
    config = config_utils.build_config_from_json(config_file)
    path_to_top_reduction_directory = config['data_red_dir']

    # Collect all subdir names, excluding logs/ and config/
    all_sub_directories = glob(
        path.join(path_to_top_reduction_directory, '[!logs!config]*/'))

    # Identify currently active reductions in the list of subdirectories
    log = logs.start_pipeline_log(config['log_dir'], 'clear_lock')
    active_reductions = automatic_pipeline.read_process_list(config, log)

    # For each remaining subdirectory, check conditions and
    # remove dataset.lock file if conditions are met
    for subdir in all_sub_directories:
        # Define conditions
        condition1 = False  # Is the highest-number stage executed, stage 2?
        condition2 = False  # Is there a fits image in ref/ ?
        condition3 = False  # Did stage 2 complete successfully?
        condition4 = False  # Is a dataset.lock file present?

        # Loop over all subdirs not in active_reductions
        if subdir.split('/')[-2] not in active_reductions.keys():

            # If the subdir does not exist or does not have a ref/ subdirectory, skip it
            # If the path exists then this sets ans = 0
            ans = check_paths(subdir)

            if (ans == 1 or ans == 2):
                break

            if (path.exists(path.join(subdir, 'stage2.log'))):
                condition1 = True

            if (path.exists(path.join(subdir, 'stage3.log')) or path.exists(
                    path.join(subdir, 'reference_astrometry.log'))):
                condition1 = False

            # Check for fits images in ref/
            if (path.exists(path.join(subdir, 'ref'))):
                ref_img = glob(path.join(subdir, 'ref', '*.fits'))
                if len(ref_img) != 0:
                    condition2 = True

            # Check whether stage 2 completed successfully
            last_line_stage2 = read_last_line(path.join(subdir, 'stage2.log'))
            if 'Processing complete' in last_line_stage2:
                condition3 = True

            # Check if dataset.lock is present
            if (path.exists(path.join(subdir, 'dataset.lock'))):
                condition4 = True

            # Check conditions and apply prescribed remedy
            if (condition1 == True and  condition2 == False and \
                condition3 == True and condition4 == True):
                remove(path.join(subdir, 'dataset.lock'))
                log.info('Removed lock file from %s' % str(subdir))

                if verbose == True:
                    print('Removed lock file from %s' % str(subdir))

            else:
                log.info('Did not remove lock file from %s' % str(subdir))

                if verbose == True:
                    print('Did not remove lock file from%s' % str(subdir))

    logs.close_log(log)
    return 1
Example #12
0
def run_stage2(setup):
    """Main driver function to run stage 2: reference selection.

    This stage is processing the metadata file, looks for the output of
    stages0 and stage1 and checks if a reference file already
    exists.

    It creates a reference frame based on the selection criteria
    defined in the configuration. If no such configuration exists, it
    falls back to a standard configuration.

    If stage1 has failed to produce output it selects a reference
    based on header information.

    It always re-runs when called, since it is a lightweight function
    """

    stage2_version = 'stage2 v0.1'

    log = logs.start_stage_log(setup.red_dir, 'stage2', version=stage2_version)
    log.info('Setup:\n' + setup.summary() + '\n')

    reduction_metadata = metadata.MetaData()

    # Load all metadata
    try:
        reduction_metadata.load_all_metadata(
            metadata_directory=setup.red_dir,
            metadata_name='pyDANDIA_metadata.fits')

        # Check data inventory on metadata
        log.info('stage2 has loaded the reduction metadata')
    except Exception as estr:
        log.info('Could not load metadata!' + repr(estr))
        status = 'FAILED'
        report = 'Loading metadata failed:' + repr(estr)
        return status, report

    try:
        n_images = len(reduction_metadata.images_stats)
    except AttributeError:
        log.info('stage2: data inventory missing.')
        status = 'FAILED'
        report = 'Data inventory (stage1) missing.'
        logs.close_log(log)
        return status, report

    # All parameters are part of metadata

    table_structure = [['IMAGE_NAME', 'MOON_STATUS', 'RANKING_KEY'],
                       ['S100', 'S100', 'float'], ['degree', None, None]]

    all_images = reduction_metadata.find_all_images(setup,
                                                    reduction_metadata,
                                                    os.path.join(
                                                        setup.red_dir, 'data'),
                                                    log=log)

    reduction_metadata.create_a_new_layer(layer_name='reference_inventory',
                                          data_structure=table_structure,
                                          data_columns=None)

    log.info('Create reference frame inventory table in metadata')

    # Iterate over images that are in the stage inventory

    reference_ranking = []

    fwhm_max = 0.
    for stats_entry in reduction_metadata.images_stats[1]:
        if float(stats_entry['FWHM_X']) > fwhm_max:
            fwhm_max = stats_entry['FWHM_X']
        if float(stats_entry['FWHM_Y']) > fwhm_max:
            fwhm_max = stats_entry['FWHM_Y']

    # taking filenames from headers_summary (stage1 change pending)
    filename_images = reduction_metadata.images_stats[1]['IM_NAME']
    data_image_directory = reduction_metadata.data_architecture[1][
        'IMAGES_PATH'][0]
    max_adu = float(reduction_metadata.reduction_parameters[1]['MAXVAL'][0])
    psf_size = int(
        4. * float(reduction_metadata.reduction_parameters[1]['KER_RAD'][0]) *
        fwhm_max)
    empirical_psf_flag = False
    if empirical_psf_flag == True:

        for stats_entry in reduction_metadata.images_stats[1]:
            image_filename = stats_entry[0]
            row_idx = np.where(reduction_metadata.images_stats[1]['IM_NAME'] ==
                               image_filename)[0][0]
            moon_status = 'dark'
            # to be reactivated as soon as it is part of metadata
            if 'MOONFKEY' in reduction_metadata.headers_summary[1].keys(
            ) and 'MOONDKEY' in reduction_metadata.headers_summary[1].keys():
                moon_status = moon_brightness_header(
                    reduction_metadata.headers_summary[1], row_idx)

            fwhm_arcsec = (float(stats_entry['FWHM_X'])**2 + float(
                stats_entry['FWHM_Y'])**2)**0.5 * float(
                    reduction_metadata.reduction_parameters[1]['PIX_SCALE'])
            # extract data inventory row for image and calculate sorting key
            # if a sufficient number of stars has been detected at s1 (40)
            if int(stats_entry['NSTARS']) > 34 and fwhm_arcsec < 3. and (
                    not 'bright' in moon_status):
                hdulist = fits.open(os.path.join(data_image_directory,
                                                 image_filename),
                                    memmap=True)
                image = hdulist[0].data
                ranking_key = empirical_psf_simple.empirical_snr_subframe(
                    image, psf_size, max_adu)
                hdulist.close()
                reference_ranking.append([image_filename, ranking_key])
                entry = [image_filename, moon_status, ranking_key]
                reduction_metadata.add_row_to_layer(
                    key_layer='reference_inventory', new_row=entry)

    else:
        for stats_entry in reduction_metadata.images_stats[1]:
            image_filename = stats_entry[0]
            row_idx = np.where(reduction_metadata.images_stats[1]['IM_NAME'] ==
                               image_filename)[0][0]
            moon_status = 'dark'
            # to be reactivated as soon as it is part of metadata
            if 'MOONFKEY' in reduction_metadata.headers_summary[1].keys(
            ) and 'MOONDKEY' in reduction_metadata.headers_summary[1].keys():
                moon_status = moon_brightness_header(
                    reduction_metadata.headers_summary[1], row_idx)

            fwhm_arcsec = (float(stats_entry['FWHM_X'])**2 + float(
                stats_entry['FWHM_Y'])**2)**0.5 * float(
                    reduction_metadata.reduction_parameters[1]['PIX_SCALE'])
            # extract data inventory row for image and calculate sorting key
            # if a sufficient number of stars has been detected at s1 (40)
            if int(stats_entry['NSTARS']) > 34 and fwhm_arcsec < 3. and (
                    not 'bright' in moon_status):
                ranking_key = add_stage1_rank(reduction_metadata, stats_entry)
                reference_ranking.append([image_filename, ranking_key])
                entry = [image_filename, moon_status, ranking_key]
                reduction_metadata.add_row_to_layer(
                    key_layer='reference_inventory', new_row=entry)

    #relax criteria...
    if reference_ranking == []:
        for stats_entry in reduction_metadata.images_stats[1]:
            image_filename = stats_entry[0]
            row_idx = np.where(reduction_metadata.images_stats[1]['IM_NAME'] ==
                               image_filename)[0][0]
            moon_status = 'dark'
            # to be reactivated as soon as it is part of metadata
            if 'MOONFKEY' in reduction_metadata.headers_summary[1].keys(
            ) and 'MOONDKEY' in reduction_metadata.headers_summary[1].keys():
                moon_status = moon_brightness_header(
                    reduction_metadata.headers_summary[1], row_idx)

            fwhm_arcsec = (float(stats_entry['FWHM_X'])**2 + float(
                stats_entry['FWHM_Y'])**2)**0.5 * float(
                    reduction_metadata.reduction_parameters[1]['PIX_SCALE'])
            # extract data inventory row for image and calculate sorting key
            if int(stats_entry['NSTARS']) > 20. and fwhm_arcsec < 3.:
                ranking_key = add_stage1_rank(reduction_metadata, stats_entry)
                reference_ranking.append([image_filename, ranking_key])
                entry = [image_filename, moon_status, ranking_key]
                reduction_metadata.add_row_to_layer(
                    key_layer='reference_inventory', new_row=entry)

    # Save the updated layer to the metadata file
    reduction_metadata.save_a_layer_to_file(
        metadata_directory=setup.red_dir,
        metadata_name='pyDANDIA_metadata.fits',
        key_layer='reference_inventory')

    if reference_ranking != []:
        best_image = sorted(reference_ranking, key=itemgetter(1))[-1]
        ref_directory_path = os.path.join(setup.red_dir, 'ref')
        if not os.path.exists(ref_directory_path):
            os.mkdir(ref_directory_path)

        ref_img_path = os.path.join(
            str(reduction_metadata.data_architecture[1]['IMAGES_PATH'][0]),
            best_image[0])

        print('New reference ' + best_image[0] + ' in ' + ref_img_path)

        try:
            copyfile(
                reduction_metadata.data_architecture[1]['IMAGES_PATH'][0] +
                '/' + best_image[0], ref_directory_path + '/' + best_image[0])
        except:
            print('copy ref failed: ', best_image[0])

        if not 'REF_PATH' in reduction_metadata.data_architecture[1].keys():
            reduction_metadata.add_column_to_layer('data_architecture',
                                                   'REF_PATH',
                                                   [ref_directory_path],
                                                   new_column_format=None,
                                                   new_column_unit=None)
        else:
            reduction_metadata.update_a_cell_to_layer('data_architecture', 0,
                                                      'REF_PATH',
                                                      ref_directory_path)
        if not 'REF_IMAGE' in reduction_metadata.data_architecture[1].keys():
            reduction_metadata.add_column_to_layer(
                'data_architecture',
                'REF_IMAGE', [os.path.basename(ref_img_path)],
                new_column_format=None,
                new_column_unit=None)
        else:
            reduction_metadata.update_a_cell_to_layer(
                'data_architecture', 0, 'REF_IMAGE',
                os.path.basename(ref_img_path))
        # Update the REDUCTION_STATUS table in metadata for stage 2

        reduction_metadata.update_reduction_metadata_reduction_status(
            all_images, stage_number=1, status=1, log=log)
        reduction_metadata.save_updated_metadata(
            metadata_directory=setup.red_dir,
            metadata_name='pyDANDIA_metadata.fits')

        status = 'OK'
        report = 'Completed successfully'
        log.info('Updating metadata with info on new images...')
        logs.close_log(log)

        return status, report

    else:
        status = 'FAILED'
        report = 'No suitable image found.'

        log.info('No reference image found...')
        logs.close_log(log)

        return status, report
Example #13
0
def run_stage5(setup):
    """Main driver function to run stage 5: kernel_solution
    This stage finds the kernel solution and (optionally) subtracts the model
    image
    :param object setup : an instance of the ReductionSetup class. See reduction_control.py

    :return: [status, report, reduction_metadata], stage5 status, report, 
     metadata file
    :rtype: array_like
    """

    stage5_version = 'stage5 v0.1'

    log = logs.start_stage_log(setup.red_dir, 'stage5', version=stage5_version)
    log.info('Setup:\n' + setup.summary() + '\n')
    try:
        from umatrix_routine import umatrix_construction, umatrix_bvector_construction, bvector_construction

    except ImportError:
        log.info(
            'Uncompiled cython code, please run setup.py: e.g.\n python setup.py build_ext --inplace'
        )
        status = 'KO'
        report = 'Uncompiled cython code, please run setup.py: e.g.\n python setup.py build_ext --inplace'
        return status, report

    # find the metadata
    reduction_metadata = metadata.MetaData()
    reduction_metadata.load_all_metadata(setup.red_dir,
                                         'pyDANDIA_metadata.fits')

    #determine kernel size based on maximum FWHM
    fwhm_max = 0.
    shift_max = 0
    for stats_entry in reduction_metadata.images_stats[1]:
        if float(stats_entry['FWHM_X']) > fwhm_max:
            fwhm_max = stats_entry['FWHM_X']
        if float(stats_entry['FWHM_Y']) > fwhm_max:
            fwhm_max = stats_entry['FWHM_Y']
        if abs(float(stats_entry['SHIFT_X'])) > shift_max:
            shift_max = abs(float(stats_entry['SHIFT_X']))
        if abs(float(stats_entry['SHIFT_Y'])) > shift_max:
            shift_max = abs(float(stats_entry['SHIFT_Y']))
    maxshift = int(shift_max) + 2
    #image smaller or equal 500x500
    large_format_image = False

    sigma_max = fwhm_max / (2. * (2. * np.log(2.))**0.5)
    # Factor 4 corresponds to the radius of 2*FWHM the old pipeline
    kernel_size = int(
        4. * float(reduction_metadata.reduction_parameters[1]['KER_RAD'][0]) *
        fwhm_max)
    if kernel_size:
        if kernel_size % 2 == 0:
            kernel_size = kernel_size + 1
    # find the images that need to be processed
    all_images = reduction_metadata.find_all_images(setup,
                                                    reduction_metadata,
                                                    os.path.join(
                                                        setup.red_dir, 'data'),
                                                    log=log)

    new_images = reduction_metadata.find_images_need_to_be_process(
        setup, all_images, stage_number=5, rerun_all=None, log=log)

    kernel_directory_path = os.path.join(setup.red_dir, 'kernel')
    diffim_directory_path = os.path.join(setup.red_dir, 'diffim')
    if not os.path.exists(kernel_directory_path):
        os.mkdir(kernel_directory_path)
    if not os.path.exists(diffim_directory_path):
        os.mkdir(diffim_directory_path)
    reduction_metadata.update_column_to_layer('data_architecture',
                                              'KERNEL_PATH',
                                              kernel_directory_path)
    # difference images are written for verbosity level > 0
    reduction_metadata.update_column_to_layer('data_architecture',
                                              'DIFFIM_PATH',
                                              diffim_directory_path)
    data_image_directory = reduction_metadata.data_architecture[1][
        'IMAGES_PATH'][0]
    ref_directory_path = '.'
    #For a quick image subtraction, pre-calculate a sufficiently large u_matrix
    #based on the largest FWHM and store it to disk -> needs config switch

    try:
        reference_image_name = str(
            reduction_metadata.data_architecture[1]['REF_IMAGE'][0])
        reference_image_directory = str(
            reduction_metadata.data_architecture[1]['REF_PATH'][0])
        max_adu = 0.3 * float(
            reduction_metadata.reduction_parameters[1]['MAXVAL'][0])
        ref_row_index = np.where(
            reduction_metadata.images_stats[1]['IM_NAME'] == str(
                reduction_metadata.data_architecture[1]['REF_IMAGE'][0]))[0][0]
        ref_fwhm_x = reduction_metadata.images_stats[1][ref_row_index][
            'FWHM_X']
        ref_fwhm_y = reduction_metadata.images_stats[1][ref_row_index][
            'FWHM_Y']
        ref_sigma_x = ref_fwhm_x / (2. * (2. * np.log(2.))**0.5)
        ref_sigma_y = ref_fwhm_y / (2. * (2. * np.log(2.))**0.5)
        ref_stats = [ref_fwhm_x, ref_fwhm_y, ref_sigma_x, ref_sigma_y]
        logs.ifverbose(log, setup,
                       'Using reference image:' + reference_image_name)
    except Exception as e:
        log.ifverbose(log, setup, 'Reference/Images ! Abort stage5' + str(e))
        status = 'KO'
        report = 'No reference image found!'
        return status, report, reduction_metadata

    if not ('SHIFT_X' in reduction_metadata.images_stats[1].keys()) and (
            'SHIFT_Y' in reduction_metadata.images_stats[1].keys()):
        log.ifverbose(log, setup, 'No xshift! run stage4 ! Abort stage5')
        status = 'KO'
        report = 'No alignment data found!'
        return status, report, reduction_metadata

    if large_format_image == False:
        subtract_small_format_image(new_images,
                                    reference_image_name,
                                    reference_image_directory,
                                    reduction_metadata,
                                    setup,
                                    data_image_directory,
                                    kernel_size,
                                    max_adu,
                                    ref_stats,
                                    maxshift,
                                    kernel_directory_path,
                                    diffim_directory_path,
                                    log=log)
    #append some metric for the kernel, perhaps its scale factor...
    reduction_metadata.update_reduction_metadata_reduction_status(
        new_images, stage_number=5, status=1, log=log)
    logs.close_log(log)
    status = 'OK'
    report = 'Completed successfully'

    return status, report
Example #14
0
def run_stage4(setup):
    """Main driver function to run stage 4: image alignement.
    This stage align the images to the reference frame!
    :param object setup : an instance of the ReductionSetup class. See reduction_control.py

    :return: [status, report, reduction_metadata], the stage4 status, the report, the metadata file
    :rtype: array_like

    """

    stage4_version = 'stage4 v0.1'

    log = logs.start_stage_log(setup.red_dir, 'stage4', version=stage4_version)
    log.info('Setup:\n' + setup.summary() + '\n')

    # find the metadata
    reduction_metadata = metadata.MetaData()
    reduction_metadata.load_all_metadata(setup.red_dir,
                                         'pyDANDIA_metadata.fits')

    # find the images needed to treat
    all_images = reduction_metadata.find_all_images(setup,
                                                    reduction_metadata,
                                                    os.path.join(
                                                        setup.red_dir, 'data'),
                                                    log=log)

    new_images = reduction_metadata.find_images_need_to_be_process(
        setup, all_images, stage_number=4, rerun_all=None, log=log)

    if len(new_images) > 0:

        # find the reference image
        try:
            reference_image_name = reduction_metadata.data_architecture[1][
                'REF_IMAGE'].data[0]
            reference_image_directory = reduction_metadata.data_architecture[
                1]['REF_PATH'].data[0]
            reference_image = open_an_image(setup,
                                            reference_image_directory,
                                            reference_image_name,
                                            image_index=0,
                                            log=None)
            logs.ifverbose(
                log, setup,
                'I found the reference frame:' + reference_image_name)
        except KeyError:
            logs.ifverbose(log, setup,
                           'I can not find any reference image! Abort stage4')

            status = 'KO'
            report = 'No reference frame found!'

            return status, report

        data = []
        images_directory = reduction_metadata.data_architecture[1][
            'IMAGES_PATH'].data[0]
        for new_image in new_images:
            target_image = open_an_image(setup,
                                         images_directory,
                                         new_image,
                                         image_index=0,
                                         log=None)

            try:
                x_new_center, y_new_center, x_shift, y_shift = find_x_y_shifts_from_the_reference_image(
                    setup,
                    reference_image,
                    target_image,
                    edgefraction=0.5,
                    log=None)

                data.append([new_image, x_shift, y_shift])
                logs.ifverbose(
                    log, setup,
                    'I found the image translation to the reference for frame:'
                    + new_image)

            except:

                logs.ifverbose(
                    log, setup,
                    'I can not find the image translation to the reference for frame:'
                    + new_image + '. Abort stage4!')

                status = 'KO'
                report = 'No shift  found for image:' + new_image + ' !'

                return status, report

        if ('SHIFT_X' in reduction_metadata.images_stats[1].keys()) and (
                'SHIFT_Y' in reduction_metadata.images_stats[1].keys()):

            for index in range(len(data)):
                target_image = data[index][0]
                x_shift = data[index][1]
                y_shift = data[index][2]
                row_index = np.where(reduction_metadata.images_stats[1]
                                     ['IM_NAME'].data == new_image)[0][0]
                reduction_metadata.update_a_cell_to_layer(
                    'images_stats', row_index, 'SHIFT_X', x_shift)
                reduction_metadata.update_a_cell_to_layer(
                    'images_stats', row_index, 'SHIFT_Y', y_shift)
                logs.ifverbose(log, setup,
                               'Updated metadata for image: ' + target_image)
        else:
            logs.ifverbose(log, setup,
                           'I have to construct SHIFT_X and SHIFT_Y columns')

            sorted_data = np.copy(data)

            for index in range(len(data)):
                target_image = data[index][0]

                row_index = np.where(reduction_metadata.images_stats[1]
                                     ['IM_NAME'].data == new_image)[0][0]

                sorted_data[row_index] = data[index]

            column_format = 'int'
            column_unit = 'pix'
            reduction_metadata.add_column_to_layer(
                'images_stats',
                'SHIFT_X',
                sorted_data[:, 1],
                new_column_format=column_format,
                new_column_unit=column_unit)

            reduction_metadata.add_column_to_layer(
                'images_stats',
                'SHIFT_Y',
                sorted_data[:, 2],
                new_column_format=column_format,
                new_column_unit=column_unit)

    reduction_metadata.update_reduction_metadata_reduction_status(
        new_images, stage_number=4, status=1, log=log)

    reduction_metadata.save_updated_metadata(
        reduction_metadata.data_architecture[1]['OUTPUT_DIRECTORY'][0],
        reduction_metadata.data_architecture[1]['METADATA_NAME'][0],
        log=log)

    logs.close_log(log)

    status = 'OK'
    report = 'Completed successfully'

    return status, report