예제 #1
0
파일: iss_fpmu.py 프로젝트: pysat/pysatNASA
def init(self):
    """Initializes the Instrument object with instrument specific values.

    Runs once upon instantiation.

    """

    ackn_str = ' '.join(('Data provided through NASA CDAWeb.  Contact',
                         '[email protected] for support and use.'))
    logger.info(ackn_str)
    self.acknowledgements = ackn_str
    self.references = ' '.join(('V. N. Coffey et al., "Validation of the',
                                'Plasma Densities and Temperatures From',
                                'the ISS Floating Potential Measurement',
                                'Unit," in IEEE Transactions on Plasma',
                                'Science, vol. 36, no. 5, pp. 2301-2308,',
                                'Oct. 2008,',
                                'doi: 10.1109/TPS.2008.2004271.\n',
                                'A. Barjatya, C.M. Swenson, D.C.',
                                'Thompson, and K.H. Wright Jr., Data',
                                'analysis of the Floating Potential',
                                'Measurement Unit aboard the',
                                'International Space Station, Rev. Sci.',
                                'Instrum. 80, 041301 (2009),',
                                'https://doi.org/10.1063/1.3116085'))

    return
예제 #2
0
    def _report_current_orbit(self):
        """ Report the current orbit to log at the info level
        """

        # Index appears as zero-indexed, though it is one-indexed
        logger.info('Loaded Orbit: {:d}'.format(self._current - 1))
        return
예제 #3
0
파일: de2_rpa.py 프로젝트: pysat/pysatNASA
def init(self):
    """Initializes the Instrument object with instrument specific values.

    Runs once upon instantiation.

    """

    logger.info(mm_de2.ackn_str)
    self.acknowledgements = mm_de2.ackn_str
    self.references = mm_de2.refs['lang']
    return
예제 #4
0
def init(self):
    """Initializes the Instrument object with instrument specific values.

    Runs once upon instantiation.

    """
    logger.info(mm_cnofs.ackn_str)
    self.acknowledgements = mm_cnofs.ackn_str
    self.references = '\n'.join(
        (mm_cnofs.refs['mission'], mm_cnofs.refs['vefi']))

    return
예제 #5
0
def init(self):
    """Initializes the Instrument object with instrument specific values.

    Runs once upon instantiation.

    """

    # Set the appropraite acknowledgements and references
    self.acknowledgements = mm_ace.acknowledgements()
    self.references = mm_ace.references(self.name)

    logger.info(self.acknowledgements)

    return
예제 #6
0
def init(self):
    """Initializes the Instrument object with instrument specific values.

    Runs once upon instantiation.

    """

    rules_url = 'https://saber.gats-inc.com/data_services.php'
    ackn_str = ' '.join(('Please see the Rules of the Road at', rules_url))

    logger.info(ackn_str)
    self.acknowledgements = ackn_str
    self.references = ''

    return
예제 #7
0
파일: omni_hro.py 프로젝트: pysat/pysatNASA
def time_shift_to_magnetic_poles(inst):
    """ OMNI data is time-shifted to bow shock. Time shifted again
    to intersections with magnetic pole.

    Parameters
    ----------
    inst : Instrument class object
        Instrument with OMNI HRO data

    Note
    ----
    Time shift calculated using distance to bow shock nose (BSN)
    and velocity of solar wind along x-direction.

    Warnings
    --------
    Use at own risk.

    """

    # need to fill in Vx to get an estimate of what is going on
    inst['Vx'] = inst['Vx'].interpolate('nearest')
    inst['Vx'] = inst['Vx'].fillna(method='backfill')
    inst['Vx'] = inst['Vx'].fillna(method='pad')

    inst['BSN_x'] = inst['BSN_x'].interpolate('nearest')
    inst['BSN_x'] = inst['BSN_x'].fillna(method='backfill')
    inst['BSN_x'] = inst['BSN_x'].fillna(method='pad')

    # make sure there are no gaps larger than a minute
    inst.data = inst.data.resample('1T').interpolate('time')

    time_x = inst['BSN_x'] * 6371.2 / -inst['Vx']
    idx, = np.where(np.isnan(time_x))
    if len(idx) > 0:
        logger.info(time_x[idx])
        logger.info(time_x)
    time_x_offset = [
        pds.DateOffset(seconds=time) for time in time_x.astype(int)
    ]
    new_index = []
    for i, time in enumerate(time_x_offset):
        new_index.append(inst.data.index[i] + time)
    inst.data.index = new_index
    inst.data = inst.data.sort_index()

    return
예제 #8
0
파일: icon_ivm.py 프로젝트: pysat/pysatNASA
def init(self):
    """Initializes the Instrument object with instrument specific values.

    Runs once upon instantiation.

    Parameters
    -----------
    inst : pysat.Instrument
        Instrument class object

    """

    logger.info(mm_icon.ackn_str)
    self.acknowledgements = mm_icon.ackn_str
    self.references = ''.join((mm_icon.refs['mission'], mm_icon.refs['ivm']))

    return
예제 #9
0
파일: omni_hro.py 프로젝트: pysat/pysatNASA
def init(self):
    """Initializes the Instrument object with instrument specific values.

    Runs once upon instantiation.

    """

    ackn_str = ''.join(('For full acknowledgement info, please see: ',
                        'https://omniweb.gsfc.nasa.gov/html/citing.html'))
    self.acknowledgements = ackn_str
    self.references = ' '.join(
        ('J.H. King and N.E. Papitashvili, Solar',
         'wind spatial scales in and comparisons',
         'of hourly Wind and ACE plasma and',
         'magnetic field data, J. Geophys. Res.,', 'Vol. 110, No. A2, A02209,',
         '10.1029/2004JA010649.'))
    logger.info(ackn_str)
    return
예제 #10
0
def init(self):
    """Initializes the Instrument object with instrument specific values.

    Runs once upon instantiation.

    """

    rules_url = 'https://www.timed.jhuapl.edu/WWW/scripts/mdc_rules.pl'
    ackn_str = ' '.join(('Please see the Rules of the Road at', rules_url))
    logger.info(ackn_str)
    self.acknowledgements = ackn_str
    self.references = ' '.join(
        ('Woods, T. N., Eparvier, F. G., Bailey,',
         'S. M., Chamberlin, P. C., Lean, J.,',
         'Rottman, G. J., Solomon, S. C., Tobiska,',
         'W. K., and Woodraska, D. L. (2005),',
         'Solar EUV Experiment (SEE): Mission',
         'overview and first results, J. Geophys.', 'Res., 110, A01312,',
         'doi:10.1029/2004JA010765.'))

    return
예제 #11
0
def init(self):
    """Initializes the Instrument object with instrument specific values.

    Runs once upon instantiation.

    Parameters
    -----------
    self : pysat.Instrument
        Instrument class object

    """

    logger.info(mm_gold.ack_str)
    logger.warning(' '.join(
        ('Time stamps may be non-unique because Channel A',
         'and B are different instruments.  An upgrade to',
         'the pysat.Constellation object is required to',
         'solve this issue. See pysat issue #614 for more', 'info.')))
    self.acknowledgements = mm_gold.ack_str
    self.references = mm_gold.ref_str

    return
예제 #12
0
def init(self):
    """Initializes the Instrument object with instrument specific values.

    Runs once upon instantiation.

    """
    self.acknowledgements = ' '.join(
        ('Data provided through NASA CDAWeb', 'Key Parameters - Shin-Yi Su',
         '(Institute of Space Science,', 'National Central University,',
         'Taiwan, R.O.C.)'))
    self.references = ' '.join(
        ('Yeh, H.C., S.‐Y. Su, Y.C. Yeh, J.M. Wu,',
         'R. A. Heelis, and B. J. Holt, Scientific',
         'mission of the IPEI payload on board',
         'ROCSAT‐1, Terr. Atmos. Ocean. Sci., 9,', 'suppl., 1999a.\n',
         'Yeh, H.C., S.‐Y. Su, R.A. Heelis, and',
         'J.M. Wu, The ROCSAT‐1 IPEI preliminary',
         'results, Vertical ion drift statistics,',
         'Terr. Atmos. Ocean. Sci., 10, 805,', '1999b.'))
    logger.info(self.acknowledgements)

    return
예제 #13
0
    def refresh(self):
        """Update list of files, if there are changes.

        Calls underlying list_rtn for the particular science instrument.
        Typically, these routines search in the pysat provided path,
        pysat_data_dir/platform/name/tag/,
        where pysat_data_dir is set by pysat.utils.set_data_dir(path=path).


        """

        output_str = '{platform} {name} {tag} {sat_id}'
        output_str = output_str.format(platform=self._sat.platform,
                                       name=self._sat.name,
                                       tag=self._sat.tag,
                                       sat_id=self._sat.sat_id)
        output_str = " ".join(("pysat is searching for", output_str, "files."))
        output_str = " ".join(output_str.split())
        logger.info(output_str)

        info = self._sat._list_rtn(tag=self._sat.tag,
                                   sat_id=self._sat.sat_id,
                                   data_path=self.data_path,
                                   format_str=self.file_format)
        info = self._remove_data_dir_path(info)
        if not info.empty:
            if self.ignore_empty_files:
                self._filter_empty_files()
            logger.info('Found {ll:d} of them.'.format(ll=len(info)))
        else:
            estr = "Unable to find any files that match the supplied template."
            estr += " If you have the necessary files please check pysat "
            estr += "settings and file locations (e.g. pysat.pysat_dir)."
            logger.warning(estr)
        # attach to object
        self._attach_files(info)
        # store - to disk, if enabled
        self._store()
예제 #14
0
def scatterplot(inst,
                labelx,
                labely,
                data_label,
                datalim,
                xlim=None,
                ylim=None):
    """Return scatterplot of data_label(s) as functions of labelx,y over a
    season.

    .. deprecated:: 2.2.0
      `scatterplot` will be removed in pysat 3.0.0, it will
      be added to pysatSeasons

    Parameters
    ----------
    labelx : string
        data product for x-axis
    labely : string
        data product for y-axis
    data_label : string, array-like of strings
        data product(s) to be scatter plotted
    datalim : numyp array
        plot limits for data_label

    Returns
    -------
    Returns a list of scatter plots of data_label as a function
    of labelx and labely over the season delineated by start and
    stop datetime objects.

    """

    warnings.warn(' '.join([
        "This function is deprecated here and will be",
        "removed in pysat 3.0.0. Please use", "pysatSeasons instead:"
        "https://github.com/pysat/pysatSeasons"
    ]),
                  DeprecationWarning,
                  stacklevel=2)

    if mpl.is_interactive():
        interactive_mode = True
        # turn interactive plotting off
        plt.ioff()
    else:
        interactive_mode = False

    # create figures for plotting
    figs = []
    axs = []

    # Check for list-like behaviour of data_label
    if type(data_label) is str:
        data_label = [data_label]

    # multiple data to be plotted
    for i in np.arange(len(data_label)):
        figs.append(plt.figure())
        ax1 = figs[i].add_subplot(211, projection='3d')
        ax2 = figs[i].add_subplot(212)
        axs.append((ax1, ax2))
        plt.suptitle(data_label[i])
        if xlim is not None:
            ax1.set_xlim(xlim)
            ax2.set_xlim(xlim)
        if ylim is not None:
            ax1.set_ylim(ylim)
            ax2.set_ylim(ylim)

    # norm method so that data may be scaled to colors appropriately
    norm = mpl.colors.Normalize(vmin=datalim[0], vmax=datalim[1])
    p = [i for i in np.arange(len(figs))]
    q = [i for i in np.arange(len(figs))]
    for i, inst in enumerate(inst):
        for j, (fig, ax) in enumerate(zip(figs, axs)):
            if not inst.empty:
                check1 = len(inst.data[labelx]) > 0
                check2 = len(inst.data[labely]) > 0
                check3 = len(inst.data[data_label[j]]) > 0
                if (check1 & check2 & check3):
                    p[j] = ax[0].scatter(inst.data[labelx],
                                         inst.data[labely],
                                         inst.data[data_label[j]],
                                         zdir='z',
                                         c=inst.data[data_label[j]],
                                         norm=norm,
                                         linewidth=0,
                                         edgecolors=None)
                    q[j] = ax[1].scatter(inst.data[labelx],
                                         inst.data[labely],
                                         c=inst.data[data_label[j]],
                                         norm=norm,
                                         alpha=0.5,
                                         edgecolor=None)

    for j, (fig, ax) in enumerate(zip(figs, axs)):
        try:
            plt.colorbar(p[j], ax=ax[0], label='Amplitude (m/s)')
        except:
            logger.info('Tried colorbar but failed, thus no colorbar.')
        ax[0].elev = 30.

    if interactive_mode:
        # turn interactive plotting back on
        plt.ion()

    return figs
예제 #15
0
def compare_model_and_inst(pairs=None, inst_name=[], mod_name=[],
                           methods=['all']):
    """Compare modelled and measured data

    .. deprecated:: 2.2.0
      `satellite_view_through_model` will be removed in pysat 3.0.0, it will
      be added to pysatModels

    Parameters
    ------------
    pairs : xarray.Dataset instance
        Dataset containing only the desired observation-model data pairs
    inst_name : list of strings
        ordered list of instrument measurements to compare to modelled data
    mod_name : list of strings
        ordered list of modelled data to compare to instrument measurements
    methods : list of strings
        statistics to calculate.  See Notes for accecpted inputs

    Returns
    ----------
    stat_dict : dict of dicts
        Dictionary where the first layer of keys denotes the instrument data
        name and the second layer provides the desired statistics
    data_units : dict
        Dictionary containing the units for the data

    Notes
    -----
    Statistics are calculated using PyForecastTools (imported as verify).
    See notes there for more details.

    all - all statistics
    all_bias - bias, meanPercentageError, medianLogAccuracy,
               symmetricSignedBias
    accuracy - returns dict with mean squared error, root mean squared error,
               mean absolute error, and median absolute error
    scaledAccuracy - returns dict with normaled root mean squared error, mean
                     absolute scaled error, mean absolute percentage error,
                     median absolute percentage error, median symmetric
                     accuracy
    bias - scale-dependent bias as measured by the mean error
    meanPercentageError - mean percentage error
    medianLogAccuracy - median of the log accuracy ratio
    symmetricSignedBias - Symmetric signed bias, as a percentage
    meanSquaredError - mean squared error
    RMSE - root mean squared error
    meanAbsError - mean absolute error
    medAbsError - median absolute error
    nRMSE - normaized root mean squared error
    scaledError - scaled error (see PyForecastTools for references)
    MASE - mean absolute scaled error
    forecastError - forecast error (see PyForecastTools for references)
    percError - percentage error
    absPercError - absolute percentage error
    logAccuracy - log accuracy ratio
    medSymAccuracy - Scaled measure of accuracy
    meanAPE - mean absolute percentage error

    """
    import verify  # PyForecastTools
    from pysat import utils

    warnings.warn(' '.join(["This function is deprecated here and will be",
                            "removed in pysat 3.0.0. Please use",
                            "pysatModelUtils instead:"
                            "https://github.com/pysat/pysatModelUtils"]),
                  DeprecationWarning, stacklevel=2)

    method_rout = {"bias": verify.bias, "accuracy": verify.accuracy,
                   "meanPercentageError": verify.meanPercentageError,
                   "medianLogAccuracy": verify.medianLogAccuracy,
                   "symmetricSignedBias": verify.symmetricSignedBias,
                   "meanSquaredError": verify.meanSquaredError,
                   "RMSE": verify.RMSE, "meanAbsError": verify.meanAbsError,
                   "medAbsError": verify.medAbsError, "MASE": verify.MASE,
                   "scaledAccuracy": verify.scaledAccuracy,
                   "nRMSE": verify.nRMSE, "scaledError": verify.scaledError,
                   "forecastError": verify.forecastError,
                   "percError": verify.percError, "meanAPE": verify.meanAPE,
                   "absPercError": verify.absPercError,
                   "logAccuracy": verify.logAccuracy,
                   "medSymAccuracy": verify.medSymAccuracy}

    replace_keys = {'MSE': 'meanSquaredError', 'MAE': 'meanAbsError',
                    'MdAE': 'medAbsError', 'MAPE': 'meanAPE',
                    'MdSymAcc': 'medSymAccuracy'}

    # Grouped methods for things that don't have convenience functions
    grouped_methods = {"all_bias": ["bias", "meanPercentageError",
                                    "medianLogAccuracy",
                                    "symmetricSignedBias"],
                       "all": list(method_rout.keys())}

    # Replace any group method keys with the grouped methods
    for gg in [(i, mm) for i, mm in enumerate(methods)
               if mm in list(grouped_methods.keys())]:
        # Extend the methods list to include all the grouped methods
        methods.extend(grouped_methods[gg[1]])
        # Remove the grouped method key
        methods.pop(gg[0])

    # Ensure there are no duplicate methods
    methods = list(set(methods))

    # Test the input
    if pairs is None:
        raise ValueError('must provide Dataset of paired observations')

    if len(inst_name) != len(mod_name):
        raise ValueError('must provide equal number of instrument and model ' +
                         'data names for comparison')

    if not np.all([iname in pairs.data_vars.keys() for iname in inst_name]):
        raise ValueError('unknown instrument data value supplied')

    if not np.all([iname in pairs.data_vars.keys() for iname in mod_name]):
        raise ValueError('unknown model data value supplied')

    if not np.all([mm in list(method_rout.keys()) for mm in methods]):
        known_methods = list(method_rout.keys())
        known_methods.extend(list(grouped_methods.keys()))
        unknown_methods = [mm for mm in methods
                           if mm not in list(method_rout.keys())]
        raise ValueError('unknown statistical method(s) requested:\n' +
                         '{:}\nuse only:\n{:}'.format(unknown_methods,
                                                      known_methods))

    # Initialize the output
    stat_dict = {iname: dict() for iname in inst_name}
    data_units = {iname: pairs.data_vars[iname].units for iname in inst_name}

    # Cycle through all of the data types
    for i, iname in enumerate(inst_name):
        # Determine whether the model data needs to be scaled
        iscale = utils.scale_units(pairs.data_vars[iname].units,
                                   pairs.data_vars[mod_name[i]].units)
        mod_scaled = pairs.data_vars[mod_name[i]].values.flatten() * iscale

        # Flatten both data sets, since accuracy routines require 1D arrays
        inst_dat = pairs.data_vars[iname].values.flatten()

        # Ensure no NaN are used in statistics
        inum = np.where(np.isfinite(mod_scaled) & np.isfinite(inst_dat))[0]


        if inum.shape[0] < 2:
            # Not all data types can use all statistics.  Print warnings
            # instead of stopping processing.  Only valid statistics
            # will be included in output
            logger.info("{:s} can't calculate stats for {:d} finite samples".format( \
                                                        iname, inum.shape[0]))
            stat_dict
        else:
            # Calculate all of the desired statistics
            for mm in methods:
                try:
                    stat_dict[iname][mm] = method_rout[mm](mod_scaled[inum],
                                                           inst_dat[inum])

                    # Convenience functions add layers to the output, remove
                    # these layers
                    if hasattr(stat_dict[iname][mm], "keys"):
                        for nn in stat_dict[iname][mm].keys():
                            new = replace_keys[nn] if nn in replace_keys.keys()\
                                else nn
                            stat_dict[iname][new] = stat_dict[iname][mm][nn]
                        del stat_dict[iname][mm]
                except ValueError as verr:
                    # Not all data types can use all statistics.  Print warnings
                    # instead of stopping processing.  Only valid statistics
                    # will be included in output
                    logger.warning("{:s} can't use {:s}: {:}".format(iname, mm, verr))
                except NotImplementedError:
                    # Not all data types can use all statistics.  Print warnings
                    # instead of stopping processing.  Only valid statistics
                    # will be included in output
                    logger.warning("{:s} can't implement {:s}".format(iname, mm))

    return stat_dict, data_units
예제 #16
0
def download(date_array,
             tag,
             inst_id,
             data_path=None,
             user=None,
             password=None,
             compression_type='o'):
    """Download Chain Data
    For tags
    Path format for daily, highrate, hourly, local:
    ftp://chain.physics.unb.ca/gps/data/tag/YYYY/DDD/YYo/
    nvd, raw, sbf:
    ftp://chain.physics.unb.ca/gps/data/nvd/STN/YYYY/MM/

    Currently only daily is confirmed to work

    Parameters
    ----------
    date_array : list of datetime.datetime

    tag : string
        daily, highrate, hourly, local, nvd, raw, sbf

    compression_type : string
        o - observation .Z UNIX compression
        d - Hatanaka AND UNIX compression
    """

    if tag not in tags:
        raise ValueError('Uknown CHAIN tag')
    elif (user is None) or (password is None):
        raise ValueError('CHAIN user account information must be provided.')

    top_dir = os.path.join(data_path)

    for date in date_array:
        logger.info('Downloading COSMIC data for ' + date.strftime('%D'))
        sys.stdout.flush()
        yr = date.strftime('%Y')
        doy = date.strftime('%j')

        # try download
        try:
            # ftplib uses a hostname not a url, so the 'ftp://' is not here
            # connect to ftp server and change to desired directory
            hostname = ''.join(('chain.physics.unb.ca'))
            ftp = ftplib.FTP(hostname)
            ftp.login(user, password)
            ftp_dir = ''.join(('/gps/data/', tag, '/', yr, '/', doy, '/',
                               yr[-2:], compression_type, '/'))
            ftp.cwd(ftp_dir)

            # setup list of station files to iterate through
            files = []
            ftp.retrlines('LIST', files.append)
            files = [file.split(None)[-1] for file in files]

            # iterate through files and download each one
            for file in files:
                if inst_id:
                    print(inst_id)
                    if file[0:3] != inst_id:
                        continue
                save_dir = os.path.join(top_dir)
                print(save_dir)
                # make directory if it doesn't already exist
                if not os.path.exists(save_dir):
                    os.makedirs(save_dir)

                save_file = os.path.join(save_dir, file)
                with open(save_file, 'wb') as f:
                    print('Downloading: ' + file + ', and saving to ' +
                          save_file)
                    ftp.retrbinary("RETR " + file, f.write)

        except ftplib.error_perm as err:
            # pass error message through and log it
            estr = ''.join((str(err)))
            print(estr)
            logger.info(estr)

    ftp.close()
    return
예제 #17
0
파일: cdaweb.py 프로젝트: pysat/pysatNASA
def download(date_array, tag=None, inst_id=None, supported_tags=None,
             remote_url='https://cdaweb.gsfc.nasa.gov', data_path=None):
    """Routine to download NASA CDAWeb CDF data.

    This routine is intended to be used by pysat instrument modules supporting
    a particular NASA CDAWeb dataset.

    Parameters
    ----------
    date_array : array_like
        Array of datetimes to download data for. Provided by pysat.
    tag : str or NoneType
        tag or None (default=None)
    inst_id : str or NoneType
        satellite id or None (default=None)
    supported_tags : dict
        dict of dicts. Keys are supported tag names for download. Value is
        a dict with 'remote_dir', 'fname'. Inteded to be pre-set with
        functools.partial then assigned to new instrument code.
        (default=None)
    remote_url : string or NoneType
        Remote site to download data from
        (default='https://cdaweb.gsfc.nasa.gov')
    data_path : string or NoneType
        Path to data directory.  If None is specified, the value previously
        set in Instrument.files.data_path is used.  (default=None)

    Examples
    --------
    ::

        # download support added to cnofs_vefi.py using code below
        fn = 'cnofs_vefi_bfield_1sec_{year:4d}{month:02d}{day:02d}_v05.cdf'
        dc_b_tag = {'remote_dir': ''.join(('/pub/data/cnofs/vefi/bfield_1sec',
                                            '/{year:4d}/')),
                    'fname': fn}
        supported_tags = {'dc_b': dc_b_tag}

        download = functools.partial(nasa_cdaweb.download,
                                     supported_tags=supported_tags)

    """

    if tag is None:
        tag = ''
    if inst_id is None:
        inst_id = ''
    try:
        inst_dict = supported_tags[inst_id][tag]
    except KeyError:
        raise ValueError('inst_id / tag combo unknown.')

    # Naming scheme for files on the CDAWeb server
    remote_dir = inst_dict['remote_dir']

    # Get list of files from server
    remote_files = list_remote_files(tag=tag, inst_id=inst_id,
                                     remote_url=remote_url,
                                     supported_tags=supported_tags,
                                     start=date_array[0],
                                     stop=date_array[-1])

    # Download only requested files that exist remotely
    for date, fname in remote_files.iteritems():
        # Format files for specific dates and download location
        formatted_remote_dir = remote_dir.format(year=date.year,
                                                 month=date.month,
                                                 day=date.day,
                                                 hour=date.hour,
                                                 min=date.minute,
                                                 sec=date.second)
        remote_path = '/'.join((remote_url.strip('/'),
                                formatted_remote_dir.strip('/'),
                                fname))

        saved_local_fname = os.path.join(data_path, fname)

        # Perform download
        logger.info(' '.join(('Attempting to download file for',
                              date.strftime('%d %B %Y'))))
        try:
            with requests.get(remote_path) as req:
                if req.status_code != 404:
                    with open(saved_local_fname, 'wb') as open_f:
                        open_f.write(req.content)
                    logger.info('Successfully downloaded {:}.'.format(
                        saved_local_fname))
                else:
                    logger.info(' '.join(('File not available for',
                                          date.strftime('%d %B %Y'))))
        except requests.exceptions.RequestException as exception:
            logger.info(' '.join((exception, '- File not available for',
                                  date.strftime('%d %B %Y'))))
    return
예제 #18
0
    def load(self, orbit_num):
        """Load a particular orbit into .data for loaded day.

        Parameters
        ----------
        orbit_num : int
            orbit number, 1 indexed (1-length or -1 to -length) with sign
            denoting forward or backward indexing

        Raises
        ------
        ValueError
            If index requested lies beyond the number of orbits

        Note
        ----
        A day of data must be loaded before this routine functions properly.
        If the last orbit of the day is requested, it will automatically be
        padded with data from the next day. The orbit counter will be
        reset to 1.

        """
        # Ensure data exits
        if not self.inst.empty:
            # Set up orbit metadata
            self._calc_orbits()

            # Pull out the requested orbit
            if orbit_num < 0:
                # Negative indexing consistent with numpy, -1 last,
                # -2 second to last, etc.
                orbit_num = self.num + 1 + orbit_num

            if orbit_num == self.num:
                # We get here if user asks for last orbit. This cal is first to
                # trap case where there is only one orbit (self.num=1), which
                # needs to be treated differently than a orbit=1 call
                if self.num != 1:
                    # More than one orbit, go back one (simple call) and
                    # then forward doing full logic for breaks across day
                    self._get_basic_orbit(self.num - 1)
                    self.next()
                else:
                    # At most one complete orbit in the file, check if we are
                    # close to beginning or end of day
                    date = self.inst.date
                    delta_start = self.inst.index[-1] - date
                    delta_end = (date + dt.timedelta(days=1)
                                 - self.inst.index[0])

                    if delta_start <= self.orbit_period * 1.05:
                        # We are near the beginning. Load the previous file,
                        # then go forward one orbit
                        self.inst.prev()
                        self.next()
                        if self.inst.index[-1] < date + delta_start:
                            # We could go back a day, iterate over orbit, as
                            # above, and the data we have is the wrong day.
                            # In this case, move forward again.  This happens
                            # when previous day doesn't have data near end of
                            # the day
                            self.next()

                    elif delta_end <= self.orbit_period * 1.05:
                        # Near end; load next file, then go back one orbit
                        self.inst.next()
                        self.prev()
                        if self.inst.index[0] > (date - delta_end
                                                 + dt.timedelta(days=1)):
                            # We could go forward a day, iterate over orbit
                            # as above, and the data we have is the wrong day.
                            # In this case, move back again. This happens when
                            # next day doesn't have data near beginning of the
                            # day
                            self.prev()
                    else:
                        # Not near beginning or end, just get the last orbit
                        # available (only one)
                        self._get_basic_orbit(-1)
            elif orbit_num == 1:
                # User asked for first orbit
                try:
                    # Orbit could start file previous; check for this condition
                    # and store the real date user wants
                    true_date = self.inst.date

                    # Go back a day
                    self.inst.prev()

                    # If and else added because of Instruments that have large
                    # gaps (e.g., C/NOFS).  In this case, prev can return
                    # empty data
                    if not self.inst.empty:
                        # Get last orbit if there is data. This will deal with
                        # orbits across file cleanly
                        self.load(-1)
                    else:
                        # No data, no previous data to account for. Move back
                        # to original data, do simple load of first orbit
                        self.inst.next()
                        self._get_basic_orbit(1)

                    # Check that this orbit should end on the current day
                    delta = true_date - self.inst.index[0]
                    if delta >= self.orbit_period:
                        # The orbit loaded isn't close enough to date to be the
                        # first orbit of the day, move forward
                        self.next()

                except StopIteration:
                    # Check if the first orbit is also the last orbit
                    self._get_basic_orbit(1)
                    self._report_current_orbit()

            elif orbit_num < self.num:
                # Load basic orbit data into data
                self._get_basic_orbit(orbit_num)
                self._report_current_orbit()

            else:
                # Gone too far
                self.inst.data = self.inst._null_data
                raise ValueError(' '.join(('Requested an orbit past total',
                                           'orbits for day')))
        else:
            logger.info(' '.join(('No data loaded in instrument object to',
                                  'determine orbits.')))
예제 #19
0
    def _equa_breaks(self, orbit_index_period=24.0):
        """Determine where breaks in an equatorial satellite orbit occur.

        Looks for negative gradients in local time (or longitude) as well as
        breaks in UT.

        Parameters
        ----------
        orbit_index_period : float
            The change in value of supplied index parameter for a single orbit
            (default=24.0)

        Raises
        ------
        ValueError
            If the orbit_index attribute is not set to an appropriate value

        """

        if self.orbit_index is None:
            raise ValueError(' '.join(('Orbit properties must be defined at ',
                                       'pysat.Instrument object instantiation.',
                                       'See Instrument docs.')))
        else:
            try:
                self.inst[self.orbit_index]
            except KeyError as err:
                raise ValueError(''.join((str(err), '\n',
                                          'Provided orbit index does not ',
                                          'exist in loaded data')))

        # Get the difference in orbit index around the orbit
        lt_diff = self.inst[self.orbit_index]
        if not self.inst.pandas_format:
            lt_diff = lt_diff.to_pandas()
        lt_diff = lt_diff.diff()

        # Get the typical (median) difference
        typical_lt_diff = np.nanmedian(lt_diff)
        logger.info(''.join(('typical lt diff ', str(typical_lt_diff))))

        # Get the Universal Time difference between data values. Assumes that
        # the time index is in UT.
        ut_vals = pds.Series(self.inst.index)
        ut_diff = ut_vals.diff()

        # Get the locations where the orbit index derivative is less than 0,
        # then do some sanity checks on these locations
        ind, = np.where((lt_diff < -0.2 * typical_lt_diff))
        if len(ind) > 0:
            ind = np.hstack((ind, np.array([len(self.inst[self.orbit_index])])))

            # Look at distance between breaks
            dist = ind[1:] - ind[0:-1]

            # Only keep orbit breaks with a distance greater than 1.  This check
            # is done to ensure robustness
            if len(ind) > 1:
                if min(dist) == 1:
                    logger.info(' '.join(('There are orbit breaks right next',
                                          'to each other')))
                ind = ind[:-1][dist > 1]

            # Check for large positive gradients around the break that would
            # suggest not a true orbit break, but rather bad orbit_index values
            new_ind = []
            for idx in ind:
                tidx, = np.where(lt_diff[(idx - 5):(idx + 6)]
                                 > 10 * typical_lt_diff)

                if len(tidx) != 0:
                    # There are large changes, this suggests a false alarm.
                    # Iterate over samples and check
                    for sub_tidx in tidx:
                        # Look at time change vs local time change
                        if(ut_diff[idx - 5:idx + 6].iloc[sub_tidx]
                           < lt_diff[idx - 5:idx + 6].iloc[sub_tidx]
                           / orbit_index_period * self.orbit_period):
                            # The change in UT is small compared to the change
                            # in the orbit index this is flagged as a false
                            # alarm, or dropped from consideration
                            logger.info(''.join(('Dropping found break ',
                                                 'as false positive.')))
                            pass
                        else:
                            # The change in UT is significant, keep orbit break
                            new_ind.append(idx)
                            break
                else:
                    # There are no large positive gradients, current orbit
                    # break passes the first test
                    new_ind.append(idx)

            # Replace all breaks with those that are 'good'
            ind = np.array(new_ind)

        # Now, assemble some orbit breaks that are not triggered by changes in
        # the orbit index
        #
        # Check if there is a UT break that is larger than orbital period, AKA
        # a time gap
        ut_change_vs_period = (ut_diff > self.orbit_period)

        # Characterize ut change using orbital period
        norm_ut = ut_diff / self.orbit_period

        # Now, look for breaks because the length of time between samples is
        # too large, thus there is no break in slt/mlt/etc, lt_diff is small
        # but UT change is big
        norm_ut_vs_norm_lt = norm_ut.gt(np.abs(lt_diff.values
                                               / orbit_index_period))

        # Indices when one or other flag is true
        ut_ind, = np.where(ut_change_vs_period
                           | (norm_ut_vs_norm_lt & (norm_ut > 0.95)))

        # Combine these UT determined orbit breaks with the orbit index orbit
        # breaks
        if len(ut_ind) > 0:
            ind = np.hstack((ind, ut_ind))
            ind = np.sort(ind)
            ind = np.unique(ind)
            logger.info('Time Gap at locations: {:}'.format(ut_ind))

        # Now that most problems in orbits should have been caught, look at
        # the time difference between orbits (not individual orbits)
        orbit_ut_diff = ut_vals[ind].diff()
        if not self.inst.pandas_format:
            orbit_lt_diff = self.inst[self.orbit_index].to_pandas()[ind].diff()
        else:
            orbit_lt_diff = self.inst[self.orbit_index][ind].diff()

        # Look for time gaps between partial orbits. The full orbital time
        # period is not required between end of one orbit and beginning of next
        # if first orbit is partial.  Also provides another general test of the
        # orbital breaks determined.
        idx, = np.where((orbit_ut_diff / self.orbit_period
                         - orbit_lt_diff.values / orbit_index_period) > 0.97)

        # Pull out breaks that pass the test, need to make sure the first one
        # is always included it gets dropped via the nature of diff
        if len(idx) > 0:
            if idx[0] != 0:
                idx = np.hstack((0, idx))
        else:
            idx = np.array([0])

        # Only keep the good indices
        if len(ind) > 0:
            ind = ind[idx]

            # Create an orbit break index, ensure first element is always 0
            if ind[0] != 0:
                ind = np.hstack((np.array([0]), ind))
        else:
            ind = np.array([0])

        # Set the index of orbit breaks and the number of orbits
        self._orbit_breaks = ind
        self.num = len(ind)