def helper_cassini_mission_phase_name(**kwargs):
    metadata = kwargs['metadata']
    obs_general_row = metadata['obs_general_row']
    time1 = obs_general_row['time1']
    for phase, start_time, stop_time in _CASSINI_PHASE_NAME_MAPPING:
        start_time_sec = julian.tai_from_iso(start_time)
        stop_time_sec = julian.tai_from_iso(stop_time)
        if start_time_sec <= time1 < stop_time_sec:
            return phase.upper()
    return None
    def DATETIME(value, offset=0):
        """Convert the given date/time string to a year-month-day format with a
        trailing "Z". The date can be in year-month-day or year-dayofyear
        format. The time component (following a capital 'T') is unchanged. If
        the value is "UNK", then "UNK" is returned."""

        if isinstance(value, float):
            return julian.ymdhms_format_from_tai(value + offset, 'T', 3, 'Z')

        if value.strip() == 'UNK': return 'UNK'

        (date,hms) = value.split('T')     # fails if not exactly two parts
        parts = date.split('-')
        if len(parts) == 3 and offset == 0:
            return date + 'T' + hms + ('Z' if not hms.endswith('Z') else '')

        if len(parts) == 2 and offset == 0:
            # Note that strftime does not support dates before 1900. Some
            # erroneous dates in some labels have erroneous years. This is a
            # workaround.
            day = time.strptime('19' + date[2:], '%Y-%j')
            date = parts[0][:2] + time.strftime('%Y-%m-%d', day)[2:]

            return date + 'T' + hms + ('Z' if not hms.endswith('Z') else '')

        tai = julian.tai_from_iso(value) + offset
        parts = value.split('.')
        digits = len(parts[-1]) if len(parts) > 1 else 0
        return julian.ymdhms_format_from_tai(tai, 'T', digits, 'Z')
def populate_obs_mission_cassini_ert2(**kwargs):
    metadata = kwargs['metadata']
    index_row = metadata['index_row']

    # STOP_TIME isn't available for COUVIS
    stop_time = index_row.get('EARTH_RECEIVED_STOP_TIME', None)
    if stop_time is None:
        return None

    try:
        ert_sec = julian.tai_from_iso(stop_time)
    except Exception as e:
        import_util.log_nonrepeating_warning(
            f'Bad earth received stop time format "{stop_time}": {e}')
        return None

    cassini_row = metadata['obs_mission_cassini_row']
    start_time_sec = cassini_row['ert1']

    if start_time_sec is not None and ert_sec < start_time_sec:
        import_util.log_warning(
            f'cassini_ert1 ({start_time_sec}) and cassini_ert2 ({ert_sec}) '
            +f'are in the wrong order - setting to ert1')
        ert_sec = start_time_sec

    return ert_sec
Exemple #4
0
def populate_obs_general_HSTx_time2_OBS(**kwargs):
    metadata = kwargs['metadata']
    index_row = metadata['index_row']
    stop_time = import_util.safe_column(index_row, 'STOP_TIME')

    if stop_time is None:
        return None

    try:
        stop_time_sec = julian.tai_from_iso(stop_time)
    except Exception as e:
        import_util.log_nonrepeating_error(
            f'Bad stop time format "{stop_time}": {e}')
        return None

    general_row = metadata['obs_general_row']
    start_time_sec = general_row['time1']

    if start_time_sec is not None and stop_time_sec < start_time_sec:
        start_time = import_util.safe_column(index_row, 'START_TIME')
        import_util.log_warning(f'time1 ({start_time}) and time2 ({stop_time}) '
                                f'are in the wrong order - setting to time1')
        stop_time_sec = start_time_sec

    return stop_time_sec
def populate_obs_general_GOSSI_time2(**kwargs):
    metadata = kwargs['metadata']
    index_row = metadata['index_row']
    stop_time = import_util.safe_column(index_row, 'IMAGE_TIME')

    if stop_time is None:
        return None

    try:
        stop_time_sec = julian.tai_from_iso(stop_time)
    except Exception as e:
        import_util.log_nonrepeating_error(
            f'Bad image time format "{stop_time}": {e}')
        return None

    general_row = metadata['obs_general_row']
    start_time_sec = general_row['time1']

    if start_time_sec is not None and stop_time_sec < start_time_sec:
        start_time = import_util.safe_column(index_row, 'START_TIME')
        import_util.log_warning(f'time1 ({start_time}) and time2 ({stop_time}) '
                                f'are in the wrong order - setting to time1')
        stop_time_sec = start_time_sec

    return stop_time_sec
def helper_cassini_planet_id(**kwargs):
    """Find the planet associated with an observation. This is based on the
    mission phase (as encoded in the observation time so it works with all
    instruments)."""

    metadata = kwargs['metadata']
    index_row = metadata['index_row']
    obs_general_row = metadata['obs_general_row']

    time_sec2 = obs_general_row['time2']

    jup = julian.tai_from_iso('2000-262T00:32:38.930')
    sat = julian.tai_from_iso('2003-138T02:16:18.383')

    if time_sec2 is None or time_sec2 < jup:
        return None
    if time_sec2 < sat:
        return 'JUP'
    return 'SAT'
Exemple #7
0
def get_start_stop_times_old(lookup: Lookup) -> Dict[str, str]:
    # TODO Remove this after get_start_stop_times() is fixed.
    date_obs, time_obs, exptime = lookup.keys(["DATE-OBS", "TIME-OBS", "EXPTIME"])

    start_date_time = f"{date_obs}T{time_obs}Z"
    stop_date_time_float = julian.tai_from_iso(start_date_time) + float(exptime)
    stop_date_time = julian.iso_from_tai(stop_date_time_float, suffix="Z")

    return {
        "start_date_time": start_date_time,
        "stop_date_time": stop_date_time,
        "exposure_duration": exptime,
    }
Exemple #8
0
def populate_obs_pds_HSTx_product_creation_time_OBS(**kwargs):
    metadata = kwargs['metadata']
    index_label = metadata['index_label']
    pct = index_label['PRODUCT_CREATION_TIME']

    try:
        pct_sec = julian.tai_from_iso(pct)
    except Exception as e:
        import_util.log_nonrepeating_error(
            f'Bad product creation time format "{pct}": {e}')
        return None

    return pct_sec
def populate_obs_pds_VGISS_product_creation_time(**kwargs):
    metadata = kwargs['metadata']
    supp_index_row = metadata['supp_index_row']
    if supp_index_row is None:
        return None
    pct = supp_index_row['PRODUCT_CREATION_TIME']

    try:
        pct_sec = julian.tai_from_iso(pct)
    except Exception as e:
        import_util.log_nonrepeating_error(
            f'Bad product creation time format "{pct}": {e}')
        return None

    return pct_sec
def populate_obs_pds_GOSSI_product_creation_time(**kwargs):
    # For GOSSI the PRODUCT_CREATION_TIME is provided in the volume label file,
    # not the individual observation rows
    metadata = kwargs['metadata']
    index_label = metadata['index_label']
    pct = index_label['PRODUCT_CREATION_TIME']

    try:
        pct_sec = julian.tai_from_iso(pct)
    except Exception as e:
        import_util.log_nonrepeating_error(
            f'Bad product creation time format "{pct}": {e}')
        return None

    return pct_sec
Exemple #11
0
def populate_obs_pds_GOSSI_product_creation_time(**kwargs):
    # For GOSSI the PRODUCT_CREATION_TIME is provided in the volume label file,
    # not the individual observation rows
    metadata = kwargs['metadata']
    index_label = metadata['index_label']
    pct = index_label['PRODUCT_CREATION_TIME']

    try:
        pct_sec = julian.tai_from_iso(pct)
    except Exception as e:
        import_util.log_nonrepeating_error(
            f'Bad product creation time format "{pct}": {e}')
        return None

    return pct_sec
Exemple #12
0
def INIT_SCLKS():

    global NEW_SCLK_FROM_TAI, NEW_TAI_FROM_SCLK
    global OLD_SCLK_FROM_TAI, OLD_TAI_FROM_SCLK

    if NEW_SCLK_FROM_TAI is not None: return

    tk = textkernel.from_file('cas00172.tsc')
    coeffts = np.array(tk['SCLK'][1]['COEFFICIENTS_82']).reshape(-1,3)
    sclk = (coeffts[:,0] + tk['SCLK_PARTITION_START_82']) / 256.
    tai = julian.tai_from_tdt(coeffts[:,1])

    tai_end = julian.tai_from_iso(EOM)
    rate_end = coeffts[-1,2]
    sclk_end = sclk[-1] + (tai_end - tai[-1]) / rate_end

    sclk = np.array(list(sclk) + [sclk_end])
    tai  = np.array(list(tai)  + [tai_end])
    cas00172 = tabulation.Tabulation(tai, sclk)
    NEW_SCLK_FROM_TAI = tabulation.Tabulation(tai, sclk)
    NEW_TAI_FROM_SCLK = tabulation.Tabulation(sclk, tai)

    tk = textkernel.from_file('cas00171.tsc')
    coeffts = np.array(tk['SCLK'][1]['COEFFICIENTS_82']).reshape(-1,3)
    sclk = (coeffts[:,0] + tk['SCLK_PARTITION_START_82']) / 256.
    tai = julian.tai_from_tdb(coeffts[:,1])

    tai_end = julian.tai_from_iso(EOM)
    rate_end = coeffts[-1,2]
    sclk_end = sclk[-1] + (tai_end - tai[-1]) / rate_end

    sclk = np.array(list(sclk) + [sclk_end])
    tai  = np.array(list(tai)  + [tai_end])
    cas00171 = tabulation.Tabulation(tai, sclk)
    OLD_SCLK_FROM_TAI = tabulation.Tabulation(tai, sclk)
    OLD_TAI_FROM_SCLK = tabulation.Tabulation(sclk, tai)
def populate_obs_pds_VGISS_product_creation_time(**kwargs):
    metadata = kwargs['metadata']
    supp_index_row = metadata['supp_index_row']
    if supp_index_row is None:
        return None
    pct = supp_index_row['PRODUCT_CREATION_TIME']

    try:
        pct_sec = julian.tai_from_iso(pct)
    except Exception as e:
        import_util.log_nonrepeating_error(
            f'Bad product creation time format "{pct}": {e}')
        return None

    return pct_sec
Exemple #14
0
def populate_obs_mission_voyager_ert(**kwargs):
    metadata = kwargs['metadata']
    index_row = metadata['index_row']
    ert_time = index_row['EARTH_RECEIVED_TIME']

    if ert_time.startswith('UNK'):
        return None

    try:
        ert_sec = julian.tai_from_iso(ert_time)
    except Exception as e:
        import_util.log_nonrepeating_error(
            f'Bad earth received time format "{ert_time}": {e}')
        return None

    return ert_sec
def populate_obs_mission_voyager_ert(**kwargs):
    metadata = kwargs['metadata']
    index_row = metadata['index_row']
    ert_time = index_row['EARTH_RECEIVED_TIME']

    if ert_time.startswith('UNK'):
        return None

    try:
        ert_sec = julian.tai_from_iso(ert_time)
    except Exception as e:
        import_util.log_nonrepeating_error(
            f'Bad earth received time format "{ert_time}": {e}')
        return None

    return ert_sec
Exemple #16
0
def populate_obs_mission_hubble_publication_date(**kwargs):
    metadata = kwargs['metadata']
    index_row = metadata['index_row']
    pub_date = index_row['PUBLICATION_DATE']

    if pub_date is None:
        return None

    try:
        pub_date_sec = julian.tai_from_iso(pub_date)
    except Exception as e:
        import_util.log_nonrepeating_error(
            f'Bad publication date format "{pub_date}": {e}')
        return None

    return pub_date_sec
Exemple #17
0
def populate_obs_general_HSTx_time1_OBS(**kwargs):
    metadata = kwargs['metadata']
    index_row = metadata['index_row']
    start_time = import_util.safe_column(index_row, 'START_TIME')

    if start_time is None:
        return None

    try:
        start_time_sec = julian.tai_from_iso(start_time)
    except Exception as e:
        import_util.log_nonrepeating_error(
            f'Bad start time format "{start_time}": {e}')
        return None

    return start_time_sec
def populate_obs_mission_cassini_ert1(**kwargs):
    metadata = kwargs['metadata']
    index_row = metadata['index_row']

    # START_TIME isn't available for COUVIS
    start_time = index_row.get('EARTH_RECEIVED_START_TIME', None)
    if start_time is None:
        return None

    try:
        ert_sec = julian.tai_from_iso(start_time)
    except Exception as e:
        import_util.log_nonrepeating_warning(
            f'Bad earth received start time format "{start_time}": {e}')
        return None

    return ert_sec
def populate_obs_general_GOSSI_time1(**kwargs):
    metadata = kwargs['metadata']
    index_row = metadata['index_row']
    stop_time = import_util.safe_column(index_row, 'IMAGE_TIME')
    exposure = import_util.safe_column(index_row, 'EXPOSURE_DURATION')

    if exposure is None or stop_time is None:
        exposure = 0

    try:
        stop_time_sec = julian.tai_from_iso(stop_time)
    except Exception as e:
        import_util.log_nonrepeating_error(
            f'Bad image time format "{stop_time}": {e}')
        return None

    return stop_time_sec-exposure/1000
def populate_obs_general_GOSSI_time1_OBS(**kwargs):
    metadata = kwargs['metadata']
    index_row = metadata['index_row']
    stop_time = import_util.safe_column(index_row, 'IMAGE_TIME')
    exposure = import_util.safe_column(index_row, 'EXPOSURE_DURATION')

    if exposure is None or stop_time is None:
        exposure = 0

    try:
        stop_time_sec = julian.tai_from_iso(stop_time)
    except Exception as e:
        import_util.log_nonrepeating_error(
            f'Bad image time format "{stop_time}": {e}')
        return None

    return stop_time_sec - exposure / 1000
def populate_obs_general_VGISS_time1(**kwargs):
    metadata = kwargs['metadata']
    supp_index_row = metadata['supp_index_row']
    if supp_index_row is None:
        return None
    start_time = import_util.safe_column(supp_index_row, 'START_TIME')

    if start_time is None:
        return None

    try:
        start_time_sec = julian.tai_from_iso(start_time)
    except Exception as e:
        import_util.log_nonrepeating_error(
            f'Bad start time format "{start_time}": {e}')
        return None

    return start_time_sec
Exemple #22
0
profile_label = profile_index_table.info.label.as_dict()

tol_fp = open('Final-Cassini-TOL.txt', 'r')
tol_fp.readline()  # Header

tol_list = []
while True:
    line = tol_fp.readline().strip()
    if line == '':
        break
    fields = line.split('\t')
    obs_id = fields[0]
    if (not obs_id.startswith('RSS') or not obs_id.endswith('PRIME')
            or not 'OCC' in obs_id):
        continue
    start_time = julian.tai_from_iso(fields[3])
    end_time = julian.tai_from_iso(fields[6])
    tol_list.append((obs_id, start_time, end_time))

tol_fp.close()

print('TOL entries:', len(tol_list))

output_fp = open(supp_index_tab_filename, 'w')

for row in profile_rows:
    filespec = row['FILE_SPECIFICATION_NAME']
    data_label_filename = vol_root + '/' + filespec
    data_label = pdstable.PdsTableInfo(data_label_filename).label.as_dict()

    lowest_detectable_opacity = data_label['LOWEST_DETECTABLE_OPACITY']
# 1980-11-12T05:15:45.520
# South
# 1980-11-13T04:19:30.640
# North
# 1981-08-26T04:18:21.080
# South
# 1985-11-06T17:22:30.040
# North
# 1986-01-24T17:10:13.320
# South
# We don't cache these dates because they're only computed once, and calling
# julian.tai_from_iso directly allows us to do an easy vectorization of a list
# of dates.
THRESHOLD_START_TIME_VG_AT_NORTH = julian.tai_from_iso([
    '1980-11-12T05:15:45.520', '1980-11-13T04:19:30.640',
    '1981-08-26T04:16:45.080', '1985-11-06T17:22:30.040',
    '1986-01-24T17:10:13.320'
])

# This class handles everything that the instruments VGISS, VGPPS, VGRSS,
# and VGUVS have in common in the VG_28xx reflection/occultation profile
# volumes.


class ObsInstrumentVG28xx(ObsMissionVoyager):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

    #############################
    ### OVERRIDE FROM ObsBase ###
    #############################
Exemple #24
0
def cached_tai_from_iso(s):
    return julian.tai_from_iso(s)
Exemple #25
0
    def __init__(self,
                 label_file,
                 times=[],
                 columns=[],
                 nostrip=[],
                 callbacks={}):
        """Constructor for a PdsTable object.

        Input:
            label_file      the path to the PDS label of the table file, or else
                            the contents of the label as a list of strings.
            columns         an optional list of the names of the columns to
                            return. If the list is empty, then every column is
                            returned.
            times           an optional list of the names of time columns to be
                            stored as floats in units of seconds TAI rather than
                            as strings.
            nostrip         an optional list of the names of string columns that
                            are not to be stripped of surrounding whitespace.
            callbacks       an optional dictionary that returns a callback
                            function given the name of a column. If a callback
                            is provided for any column, then the function is
                            called on the string value of that column before it
                            is parsed. This can be used to update known syntax
                            errors in a particular table.
        """

        # Parse the label
        self.info = PdsTableInfo(label_file)

        # Select the columns
        if len(columns) == 0:
            keys = [info.name for info in self.info.column_info_list]
        else:
            keys = columns

        # Load the table data
        file = open(self.info.table_file_path, "r")
        lines = file.readlines()
        file.close()

        table = np.array(lines, dtype=self.info.dtype0)

        # Extract the substring arrays and save in a dictionary and list...
        self.column_list = []
        self.column_dict = {}

        for key in keys:
            column_info = self.info.column_info_dict[key]
            column = table[key]

            # For multiple items...
            if column_info.items > 1:

                # Replace the column substring with a list of sub-substrings
                column.dtype = np.dtype(column_info.dtype1)

                items = []
                for i in range(column_info.items):
                    item = column["item_" + str(i)]
                    items.append(item)

                # Apply the callback function if necessary for each tem
                if key in callbacks:
                    old_items = items
                    items = []
                    callback = callbacks[key]
                    for item in old_items:
                        rows = []
                        for row in item:
                            rows.append(callback(str(row)))
                        rows = np.array(rows)
                        items.append(np.array(rows))

                # Strip strings...
                if column_info.dtype2 is None and key not in nostrip:
                    old_items = items
                    items = []
                    for item in old_items:
                        rows = []
                        for row in item:
                            rows.append(str(row).strip())
                        rows = np.array(rows)
                    items.append(np.array(rows))

                # ...or convert other data types
                else:
                    old_items = items
                    items = []
                    for item in old_items:
                        items.append(item.astype(column_info.dtype2))

                column = np.array(items).swapaxes(0, 1)

            # Apply the callback function if necessary
            else:
                if key in callbacks:
                    callback = callbacks[key]
                    rows = []
                    for row in column:
                        rows.append(callback(str(row)))
                    column = np.array(rows)

                # Strip strings...
                if column_info.dtype2 is None:
                    if key not in nostrip:
                        rows = []
                        for row in column:
                            rows.append(str(row).strip())
                        column = np.array(rows)

                # ...or convert other data types
                else:
                    try:
                        column = column.astype(column_info.dtype2)
                    except ValueError:
                        warnings.warn("Illegal " + column_info.dtype2 +
                                      " format in column " + column_info.name +
                                      "; values were not converted to numeric")

                # Convert time columns if necessary
                if key in times:
                    column = julian.tai_from_iso(column)

            self.column_list.append(column)
            self.column_dict[key] = column
################################################################################
# obs_instrument_cocirs_cube.py
#
# Defines the ObsInstrumentCOCIRSCube class, which encapsulates fields in the
# common, obs_mission_cassini, and obs_instrument_cocirs tables for volumes
# COCIRS_[01]xxx.
################################################################################

import julian

import opus_support

from obs_mission_cassini import ObsMissionCassini

_EQUINOX_DATE = julian.tai_from_iso('2009-08-11T01:40:08.914')


class ObsInstrumentCOCIRSCube(ObsMissionCassini):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

    def _get_cube_map_projection(self):
        return self._index_col('PRODUCT_ID')[-1].lower()

    def _is_ring_map_projection(self):
        return self._get_cube_map_projection() == 'r'

    def _is_equi_map_projection(self):
        return self._get_cube_map_projection() == 'e'

    # Equinox: 2009-08-11T01:40:08.914
Exemple #27
0
    lowest_detectable_opacity = data_label['LOWEST_DETECTABLE_OPACITY']
    highest_detectable_opacity = data_label['HIGHEST_DETECTABLE_OPACITY']
    spacecraft_clock_start_count = data_label['SPACECRAFT_CLOCK_START_COUNT']
    spacecraft_clock_stop_count = data_label['SPACECRAFT_CLOCK_STOP_COUNT']

    if lowest_detectable_opacity > highest_detectable_opacity:
        print(f'{filespec} Opacities are in wrong order {lowest_detectable_opacity} {highest_detectable_opacity}')
        (lowest_detectable_opacity, highest_detectable_opacity) = (highest_detectable_opacity, lowest_detectable_opacity)

    if spacecraft_clock_stop_count < spacecraft_clock_start_count:
        # print(f'{clean_filespec} *** Spacecraft clock count in wrong order {spacecraft_clock_start_count} {spacecraft_clock_stop_count} - swapping')
        spacecraft_clock_start_count, spacecraft_clock_stop_count = (
            spacecraft_clock_stop_count, spacecraft_clock_start_count
        )

    start_time_sec = julian.tai_from_iso(start_time)
    stop_time_sec = julian.tai_from_iso(stop_time)

    if stop_time_sec - start_time_sec > 86400 * 10:
        print(f'{clean_filespec} *** Bad start/stop time {start_time} {stop_time} - looking at data file')
        # start_time_sec = None
        # stop_time_sec = None
        with open(data_label_info.table_file_path, 'r') as tab_fp:
            csv_reader = csv.reader(tab_fp, delimiter=',')
            tab_rows = [[y.strip('"').strip() for y in x] for x in csv_reader]
        tab_start_time_sec = julian.tai_from_tdb(float(tab_rows[0][6]))
        tab_ring_start_time_sec = julian.tai_from_tdb(float(tab_rows[0][7]))
        tab_stop_time_sec = julian.tai_from_tdb(float(tab_rows[-1][6]))
        tab_ring_stop_time_sec = julian.tai_from_tdb(float(tab_rows[-1][7]))
        if tab_start_time_sec > tab_stop_time_sec:
            (tab_start_time_sec, tab_stop_time_sec) = (tab_stop_time_sec, tab_start_time_sec)
Exemple #28
0
def write_uvis_pds4_label(datafile, pds3_label):

    # Read the PDS3 label and the VICAR header, fixing known syntax errors
    label_text = open(pds3_label).read()
    label_text = label_text.replace('\r','') # pyparsing is not set up for <CR>
    label = pdsparser.PdsLabel.from_string(label_text).as_dict()

    # Define the lookup dictionary
    lookup = label.copy()
    is_qube = ('QUBE' in label)
    lookup['is_qube'] = is_qube
    if is_qube:
        lookup.update(label['QUBE'])
        pds3_filename = label['^QUBE']
    elif ('SPECTRUM' in label):
        lookup.update(label['SPECTRUM'])
        pds3_filename = label['^SPECTRUM']
    else:
        lookup.update(label['TIME_SERIES'])
        pds3_filename = label['^TIME_SERIES']

    # Define all the derived quantities
    lookup['datafile'] = datafile
    basename = os.path.basename(datafile)
    lookup['basename'] = basename
    inst = basename.split('_')[-1][:-4]
    lookup['inst'] = inst

    desc = lookup['DESCRIPTION']
    k = desc.find('The purpose of this observation')
    if k < 0:
        purpose_str = ''
    else:
        purpose_str = desc[k:]
    lookup['purpose_str'] = purpose_str
    lookup['purpose'] = uvis_purpose(purpose_str, lookup['TARGET_NAME'])

    observation_id = OBSERVATION_IDS[basename[:14]]
    lookup['OBSERVATION_ID'] = observation_id

    # For EUV/FUV...
    if inst in ('euv', 'fuv'):

      # Find window(s)
      if 'UL_CORNER_SPATIAL' in lookup:
        lookup['UL_CORNER_LINE'] = [lookup['UL_CORNER_SPATIAL' ]]
        lookup['UL_CORNER_BAND'] = [lookup['UL_CORNER_SPECTRAL']]
        lookup['LR_CORNER_LINE'] = [lookup['LR_CORNER_SPATIAL' ]]
        lookup['LR_CORNER_BAND'] = [lookup['LR_CORNER_SPECTRAL']]
        lookup['BAND_BIN'] = [1]
        lookup['LINE_BIN'] = [1]

      if not isinstance(lookup['UL_CORNER_LINE'], list):
        lookup['UL_CORNER_LINE'] = [lookup['UL_CORNER_LINE']]
        lookup['UL_CORNER_BAND'] = [lookup['UL_CORNER_BAND']]
        lookup['LR_CORNER_LINE'] = [lookup['LR_CORNER_LINE']]
        lookup['LR_CORNER_BAND'] = [lookup['LR_CORNER_BAND']]
        lookup['BAND_BIN'] = [lookup['BAND_BIN']]
        lookup['LINE_BIN'] = [lookup['LINE_BIN']]

      # Locate the calibration file(s)
      lookup['calibration_files'] = []
      for version in ('3','4','5'):
        calpath = (datafile.replace('data_raw_','calibration_data_')[:-8] +
                   '_cal_' + version + datafile[-8:])
        if os.path.exists(calpath):
            lookup['calibration_files'].append(os.path.basename(calpath))

    # For HDAC, determine mode
    if inst == 'hdac':
        if sum(lookup['D_LEVEL']) + sum(lookup['H_LEVEL']) == 0:
            lookup['mode'] = 'photometer'
        else:
            lookup['mode'] = 'modulation'

    # For HDAC in modulation mode, count trailing zeros
    if inst == 'hdac':
      if lookup['mode'] == 'modulation':
        data = np.fromfile(datafile, dtype='uint16')
        nonzeros = len(data)
        while data[nonzeros-1] == 0:
            nonzeros -= 1

        zeros = len(data) - nonzeros
        if zeros:
            print '%d trailing HDAC zeros found:' % zeros, datafile

        lookup['zeros'] = zeros
        lookup['nonzeros'] = nonzeros
      else:
        lookup['zeros'] = 0
        lookup['nonzeros'] = lookup['ROWS']

    # For HDAC, determine the duration
    if inst == 'hdac':
        lookup['duration'] = lookup['nonzeros'] * 0.125

    # For HSP, determine start time, reference time, duration
    if inst == 'hsp':
        texp = lookup['SAMPLING_PARAMETER_INTERVAL'] / 1000.
        lookup['texp'] = texp

        INIT_SCLKS()
        old_start_tai = julian.tai_from_iso(lookup['START_TIME']) - texp
        start_sclk = OLD_SCLK_FROM_TAI(old_start_tai)
        new_start_tai = NEW_TAI_FROM_SCLK(start_sclk)
        lookup['shift_secs'] = new_start_tai - old_start_tai

        stop_sclk = start_sclk + texp * lookup['ROWS']
        new_stop_tai = NEW_TAI_FROM_SCLK(stop_sclk)
        lookup['delta'] = (new_stop_tai - new_start_tai) / lookup['ROWS']

        lookup['new_start_tai'] = new_start_tai
        lookup['new_stop_tai'] = new_stop_tai
        lookup['start_sclk'] = start_sclk

    # Special care for target identifications
    target_info = uvis_target_info(label['TARGET_NAME'], observation_id,
                                   purpose_str, datafile)

    target_names    = []
    target_alts     = []
    target_naif_ids = []
    target_types    = []
    target_lids     = []
    primary_names    = []
    primary_naif_ids = []
    primary_lids     = []

    for k in range(len(target_info)):
        target_names.append(target_info[k][0])
        target_alts.append(target_info[k][1])
        target_naif_ids.append(get_naif_id(target_info[k][1]))
        target_types.append(target_info[k][2])
        primary_names.append(target_info[k][3])
        target_lids.append(target_info[k][4])

        if primary_names[-1] == 'N/A':
            primary_naif_ids.append('N/A')
            primary_lids.append('N/A')
        else:
            primary_info = TARGET_DICT[primary_names[-1].upper()]
            primary_naif_ids.append(get_naif_id(primary_info[1]))
            primary_lids.append(primary_info[4])

    lookup['target_names'    ] = target_names
    lookup['target_alts'     ] = target_alts
    lookup['target_naif_ids' ] = target_naif_ids
    lookup['target_types'    ] = target_types
    lookup['target_lids'     ] = target_lids 
    lookup['primary_names'   ] = primary_names
    lookup['primary_naif_ids'] = primary_naif_ids
    lookup['primary_lids'    ] = primary_lids

    # Write the label
    labelfile = datafile[:-4] + '.xml'

    if inst in ('euv', 'fuv'):
        EUV_FUV_TEMPLATE.write(lookup, labelfile)
    elif inst == 'hdac':
        HDAC_TEMPLATE.write(lookup, labelfile)
    else:
        HSP_TEMPLATE.write(lookup, labelfile)
Exemple #29
0
    def __init__(self, label_file, times=[], columns=[], nostrip=[],
                       callbacks={}):
        """Constructor for a PdsTable object.

        Input:
            label_file      the path to the PDS label of the table file, or else
                            the contents of the label as a list of strings.
            columns         an optional list of the names of the columns to
                            return. If the list is empty, then every column is
                            returned.
            times           an optional list of the names of time columns to be
                            stored as floats in units of seconds TAI rather than
                            as strings.
            nostrip         an optional list of the names of string columns that
                            are not to be stripped of surrounding whitespace.
            callbacks       an optional dictionary that returns a callback
                            function given the name of a column. If a callback
                            is provided for any column, then the function is
                            called on the string value of that column before it
                            is parsed. This can be used to update known syntax
                            errors in a particular table.
        """

        # Parse the label
        self.info = PdsTableInfo(label_file)

        # Select the columns
        if len(columns) == 0:
            keys = [info.name for info in self.info.column_info_list]
        else:
            keys = columns

        # Load the table data
        file = open(self.info.table_file_path, "r")
        lines = file.readlines()
        file.close()

        table = np.array(lines, dtype=self.info.dtype0)

        # Extract the substring arrays and save in a dictionary and list...
        self.column_list = []
        self.column_dict = {}

        for key in keys:
            column_info = self.info.column_info_dict[key]
            column = table[key]

            # For multiple items...
            if column_info.items > 1:

                # Replace the column substring with a list of sub-substrings
                column.dtype = np.dtype(column_info.dtype1)

                items = []
                for i in range(column_info.items):
                    item = column["item_" + str(i)]
                    items.append(item)

                # Apply the callback function if necessary for each tem
                if callbacks.has_key(key):
                    old_items = items
                    items = []
                    callback = callbacks[key]
                    for item in old_items:
                      rows = []
                      for row in item:
                        rows.append(callback(str(row)))
                      rows = np.array(rows)
                      items.append(np.array(rows))

                # Strip strings...
                if column_info.dtype2 is None and key not in nostrip:
                    old_items = items
                    items = []
                    for item in old_items:
                      rows = []
                      for row in item:
                        rows.append(str(row).strip())
                      rows = np.array(rows)
                    items.append(np.array(rows))

                # ...or convert other data types
                else:
                    old_items = items
                    items = []
                    for item in old_items:
                        items.append(item.astype(column_info.dtype2))

                column = np.array(items).swapaxes(0,1)

            # Apply the callback function if necessary
            else:
                if callbacks.has_key(key):
                    callback = callbacks[key]
                    rows = []
                    for row in column:
                        rows.append(callback(str(row)))
                    column = np.array(rows)

                # Strip strings...
                if column_info.dtype2 is None:
                    if key not in nostrip:
                        rows = []
                        for row in column:
                            rows.append(str(row).strip())
                        column = np.array(rows)

                # ...or convert other data types
                else:
                    try:
                        column = column.astype(column_info.dtype2)
                    except ValueError:
                        warnings.warn("Illegal " + column_info.dtype2 +
                                      " format in column " + column_info.name +
                                      "; values were not converted to numeric")

                # Convert time columns if necessary
                if key in times:
                    column = julian.tai_from_iso(column)

            self.column_list.append(column)
            self.column_dict[key] = column
Exemple #30
0
def tai_from_iso(string):
    return julian.tai_from_iso(string, strip=True)