Beispiel #1
0
def test_observation_list_generation_minimal():

    # generate output directory
    temporary_directory()

    instrument = 'NIRISS'

    catalogs = {}
    if instrument == 'NIRISS':
        apt_dir = os.path.join(TEST_DATA_DIR, instrument)
        apt_file_seed = '1087_minimal'
        source_list_file_name = os.path.join(apt_dir,
                                             'niriss_point_sources.list')
        catalogs[instrument.lower()] = source_list_file_name

    # Write observationlist.yaml
    observation_list_file = os.path.join(
        TEMPORARY_DIR, '{}_observation_list.yaml'.format(instrument.lower()))
    apt_file_xml = os.path.join(apt_dir, '{}.xml'.format(apt_file_seed))
    generate_observationlist.get_observation_dict(apt_file_xml,
                                                  observation_list_file,
                                                  catalogs=catalogs)

    assert os.path.isfile(observation_list_file)
def validate(xml_file, output_dir, gseg_uncal_files):
    """MAIN FUNCTION"""
    pointing_file = xml_file.replace('.xml', '.pointing')
    gseg_rate_files = [f.replace('uncal', 'rate') for f in gseg_uncal_files]

    catalogs = {'nircam': {'sw': 'nothing.cat', 'lw': 'nothing.cat'}}

    observation_list_file = os.path.join(output_dir, 'observation_list.yaml')
    apt_xml_dict = get_observation_dict(xml_file,
                                        observation_list_file,
                                        catalogs,
                                        verbose=True)

    observation_list = set(apt_xml_dict['ObservationID'])
    int_obs = sorted([int(o) for o in observation_list])
    str_obs_list = [str(o).zfill(3) for o in int_obs]

    for observation_to_check in str_obs_list:
        print('')
        print('')
        print('OBSERVATION: {}'.format(observation_to_check))
        print('')

        good = np.where(
            np.array(apt_xml_dict['ObservationID']) == observation_to_check)

        try:
            total_expected_files = calculate_total_files(
                apt_xml_dict, good[0][0])
            print('Total number of expected files: {}'.format(
                total_expected_files))
        except IndexError:
            print("No files found.")
            continue

        # The complication here is that the table created by Mirage does not have a filename
        # attached to each entry. So we need a way to connect an actual filename
        # to each entry
        subdir_start = 'jw' + apt_xml_dict['ProposalID'][
            good[0][0]] + observation_to_check.zfill(3)
        matching_uncal_files = sorted([
            filename for filename in gseg_uncal_files
            if subdir_start in filename
        ])
        matching_rate_files = sorted([
            filename for filename in gseg_rate_files
            if subdir_start in filename
        ])
        print('Found uncal files:')
        for i in range(len(matching_uncal_files)):
            print(matching_uncal_files[i])
        print('')
        print('Found rate files:')
        for i in range(len(matching_rate_files)):
            print(matching_rate_files[i])
        print('')

        # Check to see if any files are missing
        if len(matching_uncal_files) != total_expected_files:
            print(
                "WARNING: Missing uncal files for observation {}. Expected {} files, found {}."
                .format(observation_to_check, total_expected_files,
                        len(matching_uncal_files)))
        if len(matching_rate_files) != total_expected_files:
            print(
                "WARNING: Missing rate files for observation {}. Expected {} files, found {}."
                .format(observation_to_check, total_expected_files,
                        len(matching_rate_files)))

        # Deal with the case of matching_uncal_files and matching_rate_files having
        # different lengths here. In order to loop over them they must have the same length
        if len(matching_uncal_files) != len(matching_rate_files):
            (matching_uncal_files, matching_rate_files) = equalize_file_lists(
                matching_uncal_files, matching_rate_files)
            print('Equalized file lists (should have a 1:1 correspondence):')
            for idx in range(len(matching_uncal_files)):
                print(matching_uncal_files[idx], matching_rate_files[idx])

        # Create siaf instance for later calculations
        siaf = pysiaf.Siaf('NIRCam')

        for uncal, rate in zip(matching_uncal_files, matching_rate_files):
            good_uncal = uncal != None
            good_rate = rate != None

            if good_uncal:
                print("Checking {}".format(os.path.split(uncal)[1]))
                print('-----------------------------------------------')
            elif good_rate:
                print("Checking {}".format(os.path.split(rate)[1]))
                print('-----------------------------------------------')

            if good_uncal:
                data, header, sci_header = get_data(uncal)
                detector_from_filename = uncal.split('_')[-2].upper()
                header_detector = header['DETECTOR']
                if 'LONG' in header_detector:
                    header_detector = header_detector.replace('LONG', '5')
                if header_detector not in header['APERNAME']:
                    print((
                        "WARNING: Detector name and aperture name in file header appear to be incompatible: {}, {}"
                        .format(header['DETECTOR'], header['APERNAME'])))
                    print("Detector listed in filename: {}".format(
                        detector_from_filename))
                    print(
                        'If the aperture is incorrect then the calculated subarray location from pysiaf will also be incorrect.'
                    )
                data_shape = data.shape

                # Get info from header to be compared
                header_vals = uncal_header_keywords(header)

                # Get matching data from the exposure table
                table_vals = uncal_table_info(apt_xml_dict, good[0][0])

                # Make some adjustments to the exposure table info

                # Calucate the exposure time
                aperture = header[
                    'APERNAME']  # could also try APERNAME, PPS_APER

                print('Aperture listed in header is: {}'.format(aperture))

                num_amps = 1
                frametime = calc_frame_time('NIRCam', aperture, data_shape[-1],
                                            data_shape[-2], num_amps)
                table_vals['EFFEXPTM'] = frametime * int(table_vals['NGROUPS'])

                # NAXIS
                table_vals['NAXIS'] = len(data.shape)
                header_vals['NAXIS'] = sci_header['NAXIS']

                # Use pysiaf to calculate subarray locations
                try:
                    xc, yc = sci_subarray_corners('NIRCam',
                                                  aperture,
                                                  siaf=siaf)
                    table_vals['SUBSTRT1'] = xc[0] + 1
                    table_vals['SUBSTRT2'] = yc[0] + 1
                    table_vals['SUBSIZE1'] = siaf[aperture].XSciSize
                    table_vals['SUBSIZE2'] = siaf[aperture].YSciSize
                except KeyError:
                    print(
                        "ERROR: Aperture {} is not a valid aperture in pysiaf".
                        format(aperture))
                    xc = [-2, -2]
                    yc = [-2, -2]
                    table_vals['SUBSTRT1'] = xc[0] + 1
                    table_vals['SUBSTRT2'] = yc[0] + 1
                    table_vals['SUBSIZE1'] = 9999
                    table_vals['SUBSIZE2'] = 9999

                # Create FASTAXIS and SLOWAXIS values based on the detector name
                fast, slow = find_fastaxis(header_vals['DETECTOR'])
                table_vals['FASTAXIS'] = fast
                table_vals['SLOWAXIS'] = slow

                # Remove whitespace from observing template in file
                header_vals['TEMPLATE'] = header_vals['TEMPLATE'].replace(
                    ' ', '').lower()
                table_vals['TEMPLATE'] = table_vals['TEMPLATE'].lower()

                # Adjust prime/parallel boolean from table to be a string
                if not table_vals['EXPRIPAR']:
                    table_vals['EXPRIPAR'] = 'PRIME'
                else:
                    table_vals['EXPRIPAR'] = 'PARALLEL'

                # Change exposure type from table to match up with
                # types of strings in the file
                table_vals['EXP_TYPE'] = adjust_exptype(table_vals['EXP_TYPE'])

                # Set the DETECTOR field to be identical. This info is not in the
                # exposure table, so we can't actually check it
                table_vals['DETECTOR'] = header_vals['DETECTOR']

                # Compare the actual data shape to the shape given in the header
                header_shape = (header_vals['NINTS'], header_vals['NGROUPS'],
                                header_vals['SUBSIZE2'],
                                header_vals['SUBSIZE1'])
                if header_shape != data_shape:
                    print(
                        "WARNING: Shape of data in the file does not match that specified in the header."
                    )
                    print('Data shape: {}'.format(data_shape))
                    print('Header shape: {}'.format(header_shape))

                # Now compare the data in the dictionary from the file versus that
                # from the exposure table created from the APT file
                err = False
                for key in header_vals:
                    if header_vals[key] != table_vals[key]:
                        if key not in FLOAT_KEYWORDS and key not in FILTER_KEYWORDS:
                            err = True
                            print(
                                'MISMATCH: {}, in exp table: {}, in file: {}'.
                                format(key, table_vals[key], header_vals[key]))
                        elif key in FLOAT_KEYWORDS:
                            if not np.isclose(header_vals[key],
                                              table_vals[key],
                                              rtol=0.01,
                                              atol=0.):
                                err = True
                                print(
                                    'MISMATCH: {}, in exp table: {}, in file: {}'
                                    .format(key, table_vals[key],
                                            header_vals[key]))

                        if key in ['LONGFILTER', 'LONGPUPIL'
                                   ] and 'LONG' in header_vals['DETECTOR']:
                            err = True
                            print(
                                'MISMATCH: {}, in exp table: {}, in file: {}'.
                                format(key, table_vals[key], header_vals[key]))
                        if key in ['SHORTFILTER', 'SHORTPUPIL'
                                   ] and 'LONG' not in header_vals['DETECTOR']:
                            err = True
                            print(
                                'MISMATCH: {}, in exp table: {}, in file: {}'.
                                format(key, table_vals[key], header_vals[key]))

                if not err:
                    print('No inconsistencies. File header info correct.')

            print('')
            print('')
Beispiel #3
0
                             'input_files/793_mirage_example.pointing')

output_directory = os.path.join(home_dir, 'ami_mirage_simulation_example')
simdata_output_directory = output_directory

# AB Dor is in field 19 and HD37093 is in field 20 in Kevin's targets.xlsx file
catalogues = {
    'niriss':
    os.path.join(ami_example_dir, 'stars_field19_20_combined_allfilters.list')
}
parameter_defaults = {'PAV3': 275., 'DATE': '2020-09-20'}

observation_file = os.path.join(output_directory, '793_observation_list.yaml')

generate_observationlist.get_observation_dict(xml_name,
                                              observation_file,
                                              catalogs=catalogues)
yam = yaml_generator.SimInput(input_xml=xml_name,
                              pointing_file=pointing_name,
                              catalogs=catalogues,
                              observation_list_file=observation_file,
                              verbose=True,
                              output_dir=output_directory,
                              simdata_output_dir=simdata_output_directory,
                              use_JWST_pipeline=True,
                              offline=False,
                              parameter_defaults=parameter_defaults)

datatype = 'linear, raw'
yam.datatype = datatype
yam.create_inputs()
Beispiel #4
0
def RunAllAPTTemplates(instrument):
    '''Parse the given APT files and create a set of .yamls for a given
    instrument
    '''
    # Define .pointing and .xml file locations
    pointing_file = os.path.join(TESTS_DIR, 'test_data', instrument,
                                 instrument + 'Test.pointing')
    xml_file = os.path.join(TESTS_DIR, 'test_data', instrument,
                            instrument + 'Test.xml')

    # Open XML file, get element tree of the APT proposal to determine how
    # many observations there are
    with open(xml_file) as f:
        tree = etree.parse(f)
    observation_data = tree.find(APT_NAMESPACE + 'DataRequests')
    obs_results = observation_data.findall('.//' + APT_NAMESPACE +
                                           'Observation')
    n_obs = len(obs_results)

    # Locate catalogs for target(s) (one catalog per observation and channel)
    sw_cats = [
        os.path.join(TESTS_DIR, 'test_data',
                     '2MASS_RA273.09deg_Dec65.60deg.list')
    ] * n_obs
    lw_cats = [
        os.path.join(TESTS_DIR, 'test_data',
                     'WISE_RA273.09deg_Dec65.60deg.list')
    ] * n_obs
    cat_dict = {'nircam': {'lw': lw_cats, 'sw': sw_cats}}

    # Point to appropriate output directory
    out_dir = os.path.join(TESTS_DIR, 'test_data', instrument,
                           'APT_{}_out'.format(instrument))

    if os.path.exists(out_dir):
        shutil.rmtree(out_dir)
        os.makedirs(out_dir)
    else:
        os.makedirs(out_dir)

    # Write observationlist.yaml
    observationlist_file = os.path.join(out_dir,
                                        instrument + '_observationlist.yaml')
    # write_observationlist.get_observation_dict(xml_file, pointing_file, observationlist_file,
    #                                  ps_cat_sw=sw_cats, ps_cat_lw=lw_cats)
    apt_xml_dict = generate_observationlist.get_observation_dict(
        xml_file, observationlist_file, cat_dict)

    # Create a series of data simulator input yaml files
    yam = yaml_generator.SimInput()
    yam.input_xml = xml_file
    yam.pointing_file = pointing_file
    yam.output_dir = out_dir
    yam.simdata_output_dir = out_dir
    yam.observation_list_file = observationlist_file
    yam.use_JWST_pipeline = False
    yam.use_linearized_darks = True
    yam.datatype = 'linear'
    yam.reffile_setup(offline=True)
    yam.set_global_definitions()
    yam.apt_xml_dict = apt_xml_dict
    yam.create_inputs()

    # Ensure that some of the expected files have been created
    assert os.path.exists(os.path.join(out_dir, 'Observation_table_for_' +
                                                instrument +
                                                'Test.xml_with_yaml_parameters.csv')), \
        'Observation table not created.'

    number_of_yaml_files = len(
        glob.glob(
            os.path.join(out_dir, 'jw{:05d}*.yaml'.format(int(PROPOSAL_ID)))))
    print('PROPOSAL_ID: {}'.format(PROPOSAL_ID))
    print('number of observations: {}'.format(n_obs))
    print('number of files written: {}'.format(number_of_yaml_files))
    assert n_obs == 17
    # assert number_of_yaml_files == 150
    assert number_of_yaml_files >= n_obs, 'Fewer yaml files created than observations'