예제 #1
0
def test_encode_cdftt2000():
    x = cdfepoch.encode(186999622360321123)
    assert x == '2005-12-04T20:19:18.176321123'
    y = cdfepoch.encode([500000000100, 123456789101112131],
                        iso_8601=False)
    assert y[0] == '01-Jan-2000 12:07:15.816.000.100'
    assert y[1] == '30-Nov-2003 09:32:04.917.112.131'
예제 #2
0
def test_encode_cdfepoch():
    x = cdfepoch.encode([62285326000000.0, 62985326000000.0])
    assert x[0] == '1973-09-28T23:26:40.000'
    assert x[1] == '1995-12-04T19:53:20.000'

    y = cdfepoch.encode(62975326000002.0, iso_8601=False)
    assert y == '11-Aug-1995 02:06:40.002'
예제 #3
0
def test_parse_cdftt2000():
    input_time = 131415926535793238
    x = cdfepoch.encode(input_time)
    assert x == "2004-03-01T12:24:22.351793238"
    parsed = cdfepoch.parse(x)
    assert parsed == input_time

    assert cdfepoch().to_datetime(parsed) == [datetime(2004, 3, 1, 12, 24, 22, 351793)]
예제 #4
0
def test_parse_cdfepoch16():
    input_time = 53467976543.0 + 543218654100j
    x = cdfepoch.encode(input_time)
    assert x == "1694-05-01T07:42:23.543218654100"
    parsed = cdfepoch.parse(x)
    assert parsed == input_time

    assert cdfepoch().to_datetime(parsed) == [datetime(1694, 5, 1, 7, 42, 23, 543218)]
예제 #5
0
def test_compute_cdftt2000(dtime):
    random_time = [dtime.year, dtime.month, dtime.day,
                   dtime.hour, dtime.minute, dtime.second,
                   dtime.microsecond // 1000,  # Millisecond
                   randint(0, 999),     # Microsecond
                   randint(0, 999),     # Nanosecond
                   ]
    x = cdfepoch.breakdown(cdfepoch.compute(random_time))
    for i, t in enumerate(x):
        assert t == random_time[i], f'Time {random_time} was not equal to {x}'
예제 #6
0
def test_unixtime_roundtrip(tzone):
    _environ = os.environ.copy()
    try:
        os.environ['TZ'] = tzone
        y, m, d = 2000, 1, 1
        epoch = cdfepoch.compute_tt2000([[y, m, d]])
        unixtime = cdfepoch.unixtime(epoch)
        assert unixtime == [946684800.0]
    finally:
        os.environ.clear()
        os.environ.update(_environ)
예제 #7
0
def test_compute_cdfepoch(dtime):
    '''
    Using random numbers for the compute tests
    '''
    random_time = [dtime.year, dtime.month, dtime.day,
                   dtime.hour, dtime.minute, dtime.second,
                   dtime.microsecond // 1000]
    x = cdfepoch.breakdown(cdfepoch.compute(random_time))
    i = 0
    for t in x:
        assert t == random_time[i], f'Time {random_time} was not equal to {x}'
        i += 1
예제 #8
0
def test_compute_cdfepoch16(dtime):
    random_time = [dtime.year, dtime.month, dtime.day,
                   dtime.hour, dtime.minute, dtime.second,
                   dtime.microsecond // 1000,  # Millisecond
                   randint(0, 999),     # Microsecond
                   randint(0, 999),     # Nanosecond
                   randint(0, 999),     # Picosecond
                   ]
    x = cdfepoch.breakdown(cdfepoch.compute(random_time))
    i = 0
    for t in x:
        assert t == random_time[i], f'Time {random_time} was not equal to {x}'
        i += 1
예제 #9
0
def test_findepochrange_cdfepoch():
    start_time = "2013-12-01T12:24:22.000"
    end_time = "2014-12-01T12:24:22.000"
    x = cdfepoch.parse([start_time, end_time])
    time_array = np.arange(x[0], x[1], step=1000000)

    test_start = [2014, 8, 1, 8, 1, 54, 123]
    test_end = [2018, 1, 1, 1, 1, 1, 1]
    index = cdfepoch.findepochrange(time_array, starttime=test_start, endtime=test_end)
    # Test that the test_start is less than the first index, but more than one less
    assert time_array[index[0]] >= cdfepoch.compute(test_start)
    assert time_array[index[0]-1] <= cdfepoch.compute(test_start)

    assert time_array[index[-1]] <= cdfepoch.compute(test_end)
예제 #10
0
def test_encode_cdfepoch16():
    '''
    cdf_encode_epoch16(dcomplex(63300946758.000000, 176214648000.00000)) in IDL
    returns 04-Dec-2005 20:39:28.176.214.654.976

    However, I believe this IDL routine is bugged.  This website:
    https://www.epochconverter.com/seconds-days-since-y0
    shows a correct answer.
    '''
    x = cdfepoch.encode(np.complex128(63300946758.000000 + 176214648000.00000j))
    assert x == '2005-12-04T20:19:18.176214648000'
    y = cdfepoch.encode(np.complex128([33300946758.000000 + 106014648000.00000j,
                                       61234543210.000000 + 000011148000.00000j]),
                        iso_8601=False)
    assert y[0] == '07-Apr-1055 14:59:18.106.014.648.000'
    assert y[1] == '12-Jun-1940 03:20:10.000.011.148.000'
예제 #11
0
def test_compute_cdftt2000():
    random_time = []
    random_time.append(randint(0, 2018))  # Year
    random_time.append(randint(1, 12))  # Month
    random_time.append(randint(1, 28))  # Date
    random_time.append(randint(0, 23))  # Hour
    random_time.append(randint(0, 59))  # Minute
    random_time.append(randint(0, 59))  # Second
    random_time.append(randint(0, 999))  # Millisecond
    random_time.append(randint(0, 999))  # Microsecond
    random_time.append(randint(0, 999))  # Nanosecond
    x = cdfepoch.breakdown(cdfepoch.compute(random_time))
    i = 0
    for t in x:
        assert t == random_time[i], 'Time {} was not equal to {}'.format(
            random_time, x)
        i += 1
예제 #12
0
def test_compute_cdftt2000():
    random_time = []
    # These are the supported years for CDF files; see
    # https://spdf.gsfc.nasa.gov/pub/software/cdf/doc/cdf371/cdf371ug.pdf
    # page 55
    random_time.append(randint(1709, 2292))  # Year
    random_time.append(randint(1, 12))  # Month
    random_time.append(randint(1, 28))  # Date
    random_time.append(randint(0, 23))  # Hour
    random_time.append(randint(0, 59))  # Minute
    random_time.append(randint(0, 59))  # Second
    random_time.append(randint(0, 999))  # Millisecond
    random_time.append(randint(0, 999))  # Microsecond
    random_time.append(randint(0, 999))  # Nanosecond
    x = cdfepoch.breakdown(cdfepoch.compute(random_time))
    i = 0
    for t in x:
        assert t == random_time[i], 'Time {} was not equal to {}'.format(
            random_time, x)
        i += 1
예제 #13
0
def test_breakdown_cdftt2000():
    x = cdfepoch.breakdown(123456789101112131)
    assert x[0] == 2003
    assert x[1] == 11
    assert x[2] == 30
    assert x[3] == 9
    assert x[4] == 32
    assert x[5] == 4
    assert x[6] == 917
    assert x[7] == 112
    assert x[8] == 131
예제 #14
0
def test_breakdown_cdfepoch16():
    x = cdfepoch.breakdown(np.complex128(63300946758.000000 + 176214648000.00000j))
    assert x[0] == 2005
    assert x[1] == 12
    assert x[2] == 4
    assert x[3] == 20
    assert x[4] == 19
    assert x[5] == 18
    assert x[6] == 176
    assert x[7] == 214
    assert x[8] == 648
    assert x[9] == 000
예제 #15
0
 def get_dataset_raw(self, keys, WFR_file_id=0):
     """ Convert the raw data to dict format """
     self.epoch = None
     o = {"Epoch": []}
     for f in self.files["file_objects"]:
         dates = [
             dt.datetime(i[0], i[1], i[2], i[3], i[4], i[5])
             for i in CDFepoch.breakdown(f.varget("Epoch"))
         ]
         o["Epoch"].extend(dates)
         if self.verbose: print(f.cdf_info())
         for key in keys:
             if key not in o.keys(): o[key] = f.varget(key)[:]
             else: o[key] = np.concatenate(o[key], f.varget(key)[:])
     if WFR_file_id is not None: o["WFR"] = self.get_WFR_info(WFR_file_id)
     self.epoch = o["Epoch"]
     return o
예제 #16
0
def test_breakdown_cdfepoch():
    x = cdfepoch.breakdown([62285326000000.0, 62985326000000.0])
    # First in the array
    assert x[0][0] == 1973
    assert x[0][1] == 9
    assert x[0][2] == 28
    assert x[0][3] == 23
    assert x[0][4] == 26
    assert x[0][5] == 40
    assert x[0][6] == 0
    # Second in the array
    assert x[1][0] == 1995
    assert x[1][1] == 12
    assert x[1][2] == 4
    assert x[1][3] == 19
    assert x[1][4] == 53
    assert x[1][5] == 20
    assert x[1][6] == 0
예제 #17
0
def test_findepochrange_cdftt2000():
    start_time = "2004-03-01T12:24:22.351793238"
    end_time = "2004-03-01T12:28:22.351793238"
    x = cdfepoch.parse([start_time, end_time])
    time_array = np.arange(x[0], x[1], step=1000000)

    test_start = [2004, 3, 1, 12, 25, 54, 123, 111, 98]
    test_end = [2004, 3, 1, 12, 26, 4, 123, 456, 789]
    index = cdfepoch.findepochrange(time_array, starttime=test_start, endtime=test_end)
    # Test that the test_start is less than the first index, but more than one less
    assert time_array[index[0]] >= cdfepoch.compute(test_start)
    assert time_array[index[0]-1] <= cdfepoch.compute(test_start)

    assert time_array[index[-1]] <= cdfepoch.compute(test_end)
    assert time_array[index[-1]+1] >= cdfepoch.compute(test_end)
예제 #18
0
def test_findepochrange_cdfepoch16():
    start_time = "1978-03-10T03:24:22.351793238462"
    end_time = "1978-06-13T01:28:22.338327950466"
    x = cdfepoch.parse([start_time, end_time])
    first_int_step = int((x[1].real - x[0].real) / 1000)
    second_int_step = int((x[1].imag - x[0].imag) / 1000)
    time_array = []
    for i in range(0, 1000):
        time_array.append(x[0]+complex(first_int_step*i, second_int_step*i))

    test_start = [1978, 6, 10, 3, 24, 22, 351, 793, 238, 462]
    test_end = [1978, 6, 12, 23, 11, 1, 338, 341, 416, 466]
    index = cdfepoch.findepochrange(time_array, starttime=test_start, endtime=test_end)

    # Test that the test_start is less than the first index, but more than one less
    assert time_array[index[0]].real >= cdfepoch.compute(test_start).real
    assert time_array[index[0]-1].real <= cdfepoch.compute(test_start).real
    assert time_array[index[-1]].real <= cdfepoch.compute(test_end).real
    assert time_array[index[-1]+1].real >= cdfepoch.compute(test_end).real
예제 #19
0
def test_unixtime():
    x = cdfepoch.unixtime([500000000100, 123456789101112131])
    assert x[0] == 946728435.816
    assert x[1] == 1070184724.917112
예제 #20
0
def _convert_cdf_time_types(data,
                            atts,
                            properties,
                            to_datetime=False,
                            to_unixtime=False):
    '''
    # Converts CDF time types into either datetime objects, unixtime, or nothing
    # If nothing, ALL CDF_EPOCH16 types are converted to CDF_EPOCH, because xarray can't handle int64s
    '''

    if not hasattr(data, '__len__'):
        data = [data]

    if to_datetime and to_unixtime:
        print(
            "Cannot convert to both unixtime and datetime.  Continuing with conversion to unixtime."
        )
        to_datetime = False

    # Convert all data in the "data" variable to unixtime or datetime if needed
    data_type = properties['Data_Type_Description']
    if len(data) == 0 or data_type not in ('CDF_EPOCH', 'CDF_EPOCH16',
                                           'CDF_TIME_TT2000'):
        new_data = data
    else:
        if to_datetime:
            new_data = cdfepoch.to_datetime(data)
            if 'UNITS' in atts:
                atts['UNITS']['Data'] = 'Datetime (UTC)'
        elif to_unixtime:
            new_data = cdfepoch.unixtime(data)
            if 'UNITS' in atts:
                atts['UNITS']['Data'] = 'seconds'
        else:
            if data_type == 'CDF_EPOCH16':
                new_data = cdfepoch.compute(cdfepoch.breakdown(data)[0:7])
            else:
                new_data = data

    # Convert all the attributes in the "atts" dictionary to unixtime or datetime if needed
    new_atts = {}
    for att in atts:
        data_type = atts[att]['Data_Type']
        data = atts[att]['Data']
        if not hasattr(data, '__len__'):
            data = [data]
        if len(data) == 0 or data_type not in ('CDF_EPOCH', 'CDF_EPOCH16',
                                               'CDF_TIME_TT2000'):
            new_atts[att] = data
        else:
            if to_datetime:
                new_atts[att] = cdfepoch.to_datetime(data)
            elif to_unixtime:
                new_atts[att] = cdfepoch.unixtime(data)
            else:
                if data_type == 'CDF_EPOCH16':
                    new_atts[att] = cdfepoch.compute(
                        cdfepoch.breakdown(data)[0:7])
                else:
                    new_atts[att] = data

    return new_data, new_atts
예제 #21
0
def test_parse_cdfepoch():
    x = cdfepoch.encode(62567898765432.0)
    assert x == "1982-09-12T11:52:45.432"
    parsed = cdfepoch.parse(x)
    assert parsed == approx(62567898765432.0)
예제 #22
0
 def time_epoch_encode(self, to_np):
     cdfepoch.encode(self.epochs)
예제 #23
0
 def time_epoch_to_datetime_tt2000(self, to_np):
     cdfepoch.to_datetime(self.epochs_tt2000)
예제 #24
0
 def time_epoch_to_datetime(self, to_np):
     cdfepoch.to_datetime(self.epochs)
예제 #25
0
파일: cdf.py 프로젝트: hayesla/sunpy
def read_cdf(fname):
    """
    Read a CDF file that follows the ISTP/IACG guidelines.

    Parameters
    ----------
    fname : path-like
        Location of single CDF file to read.

    Returns
    -------
    list[GenericTimeSeries]
        A list of time series objects, one for each unique time index within
        the CDF file.

    References
    ----------
    Space Physics Guidelines for CDF https://spdf.gsfc.nasa.gov/sp_use_of_cdf.html
    """
    cdf = cdflib.CDF(str(fname))

    # Extract the time varying variables
    cdf_info = cdf.cdf_info()
    meta = cdf.globalattsget()
    all_var_keys = cdf_info['rVariables'] + cdf_info['zVariables']
    var_attrs = {key: cdf.varattsget(key) for key in all_var_keys}
    # Get keys that depend on time
    var_keys = [var for var in var_attrs if 'DEPEND_0' in var_attrs[var]]

    # Get unique time index keys
    time_index_keys = sorted(set([var_attrs[var]['DEPEND_0'] for var in var_keys]))

    all_ts = []
    # For each time index, construct a GenericTimeSeries
    for index_key in time_index_keys:
        try:
            index = cdf.varget(index_key)
        except ValueError:
            # Empty index for cdflib >= 0.3.20
            continue
        if index is None:
            # Empty index for cdflib <0.3.20
            continue
        # TODO: use to_astropy_time() instead here when we drop pandas in timeseries
        index = CDFepoch.to_datetime(index)
        df = pd.DataFrame(index=pd.DatetimeIndex(name=index_key, data=index))
        units = {}

        for var_key in sorted(var_keys):
            attrs = var_attrs[var_key]
            if attrs['DEPEND_0'] != index_key:
                continue

            # Get data
            if cdf.varinq(var_key)['Last_Rec'] == -1:
                log.debug(f'Skipping {var_key} in {fname} as it has zero elements')
                continue

            data = cdf.varget(var_key)
            # Get units
            if 'UNITS' in attrs:
                unit_str = attrs['UNITS']
                try:
                    unit = u.Unit(unit_str)
                except ValueError:
                    if unit_str in _known_units:
                        unit = _known_units[unit_str]
                    else:
                        warn_user(f'astropy did not recognize units of "{unit_str}". '
                                  'Assigning dimensionless units. '
                                  'If you think this unit should not be dimensionless, '
                                  'please raise an issue at https://github.com/sunpy/sunpy/issues')
                        unit = u.dimensionless_unscaled
            else:
                warn_user(f'No units provided for variable "{var_key}". '
                          'Assigning dimensionless units.')
                unit = u.dimensionless_unscaled

            if data.ndim == 2:
                # Multiple columns, give each column a unique label
                for i, col in enumerate(data.T):
                    df[var_key + f'_{i}'] = col
                    units[var_key + f'_{i}'] = unit
            else:
                # Single column
                df[var_key] = data
                units[var_key] = unit

        all_ts.append(GenericTimeSeries(data=df, units=units, meta=meta))

    if not len(all_ts):
        log.debug(f'No data found in file {fname}')
    return all_ts
예제 #26
0
파일: cdf.py 프로젝트: alasdairwilson/sunpy
def read_cdf(fname):
    """
    Read a CDF file that follows the ISTP/IACG guidelines.

    Parameters
    ----------
    fname : path-like
        Location of single CDF file to read.

    Returns
    -------
    list[GenericTimeSeries]
        A list of time series objects, one for each unique time index within
        the CDF file.

    References
    ----------
    Space Physics Guidelines for CDF https://spdf.gsfc.nasa.gov/sp_use_of_cdf.html
    """
    cdf = cdflib.CDF(str(fname))

    # Extract the time varying variables
    cdf_info = cdf.cdf_info()
    meta = cdf.globalattsget()
    all_var_keys = cdf_info['rVariables'] + cdf_info['zVariables']
    var_attrs = {key: cdf.varattsget(key) for key in all_var_keys}
    # Get keys that depend on time
    var_keys = [var for var in var_attrs if 'DEPEND_0' in var_attrs[var]]

    # Get unique time index keys
    time_index_keys = sorted(
        set([var_attrs[var]['DEPEND_0'] for var in var_keys]))

    all_ts = []
    # For each time index, construct a GenericTimeSeries
    for index_key in time_index_keys:
        index = cdf.varget(index_key)
        # TODO: use to_astropy_time() instead here when we drop pandas in timeseries
        index = CDFepoch.to_datetime(index)
        df = pd.DataFrame(index=pd.DatetimeIndex(name=index_key, data=index))
        units = {}

        for var_key in sorted(var_keys):
            attrs = var_attrs[var_key]
            if attrs['DEPEND_0'] != index_key:
                continue

            # Get data
            data = cdf.varget(var_key)
            # Get units
            unit = attrs['UNITS']
            if unit in ['None', '', 'unitless']:
                unit = u.dimensionless_unscaled
            else:
                unit = u.Unit(unit)

            if data.ndim == 2:
                # Multiple columns, give each column a unique label
                for i, col in enumerate(data.T):
                    df[var_key + f'_{i}'] = col
                    units[var_key + f'_{i}'] = unit
            else:
                # Single column
                df[var_key] = data
                units[var_key] = unit

        all_ts.append(GenericTimeSeries(data=df, units=units, meta=meta))

    return all_ts