def download_ql_data(t0, t1): t0 = dt.datetime(2020, 1, 17, 19, 30) t1 = dt.datetime(2020, 1, 17, 21, 0) t0 = dt.datetime(2019, 12, 16, 18, 0, 0) t1 = dt.datetime(2019, 12, 17, 8, 0, 0) start_date = dt.datetime.combine(t0.date(), dt.time(0, 0, 0)) end_date = dt.datetime.combine(t1.date() + dt.timedelta(days=1), dt.time(0, 0, 0)) api = sdc.MrMMS_SDC_API('mms1', 'afg', 'srvy', 'ql', start_date=start_date, end_date=end_date) afg_files = api.download() api.instr = 'edp' api.mode = 'fast' api.optdesc = 'dce' edp_files = api.download() api.instr = 'fpi' api.optdesc = 'des' des_files = api.download() api.optdesc = 'dis' dis_files = api.download()
def read_mec_position(sc, start_date, end_date, optdesc='epht89d'): mode = 'srvy' level = 'l2' # MEC tepoch = epochs.CDFepoch() t_vname = 'Epoch' r_vname = '_'.join((sc, 'mec', 'r', 'gse')) api = sdc.MrMMS_SDC_API(sc, 'mec', mode, level, optdesc=optdesc, start_date=start_date, end_date=end_date) files = api.download_files() t_mec = np.empty(0, dtype='datetime64') r_mec = np.empty((0, 3), dtype='float') for file in files: cdf = cdfread.CDF(file) time = cdf.varget(t_vname) t_mec = np.append(t_mec, tepoch.to_datetime(time, to_np=True), 0) r_mec = np.append(r_mec, cdf.varget(r_vname), 0) # Filter based on time interval dt_start = dt.datetime.strptime(start_date, '%Y-%m-%dT%H:%M:%S.%f') dt_end = dt.datetime.strptime(end_date, '%Y-%m-%dT%H:%M:%S.%f') istart = np.searchsorted(t_mec, dt_start) iend = np.searchsorted(t_mec, dt_end) return t_mec[istart:iend], r_mec[istart:iend, :]
def load_data(sc, mode, start_date, end_date, level='l2', coords='gse'): """ Load EDP spacecraft potential data. Parameters ---------- sc : str Spacecraft ID: ('mms1', 'mms2', 'mms3', 'mms4') mode : str Instrument mode: ('fast', 'brst'). If 'srvy' is given, it is automatically changed to 'fast'. start_date, end_date : `datetime.datetime` Start and end of the data interval. Returns ------- scpot : `metaarray.metaarray` Spacecraft potential. """ # Check the inputs check_spacecraft(sc) mode = check_mode(mode) # File and variable name parameters instr = 'edp' optdesc = 'dce' t_vname = '_'.join((sc, instr, 'epoch', mode, level)) e_vname = '_'.join((sc, instr, optdesc, coords, mode, level)) # Download the data sdc = api.MrMMS_SDC_API(sc, instr, mode, level, optdesc=optdesc, start_date=start_date, end_date=end_date) edp_files = sdc.download_files() edp_files = api.sort_files(edp_files)[0] # Concatenate data along the records (time) dimension, which # should be equivalent to the DEPEND_0 variable name of the # magnetic field variable. edp_data = [] for file in edp_files: edp_data.append(util.cdf_to_ds(file, e_vname)) edp_data = xr.concat(edp_data, dim=edp_data[0][e_vname].dims[0]) edp_data = edp_data.rename({t_vname: 'time', e_vname: 'E'}) edp_data = edp_data.sel(time=slice(start_date, end_date)) edp_data.attrs['files'] = edp_files return edp_data
def __init__(self, sc, level, start, end, include_selections=True, include_partials=True, verbose=False): self.sc = sc self.level = level self.include_selections = include_selections self.verbose = verbose if isinstance(start, int): sroi = api.mission_events('sroi', start, end, sc=sc) self.start_date = sroi['tstart'][0] self.end_date = sroi['tend'][-1] else: self.start_date = validate_date(start) self.end_date = validate_date(end) if (include_partials and not include_selections): raise ValueError( "Include_selections must be true in order to include partial selections in the combined dataframe." ) self.include_selections = include_selections self.include_partials = include_partials # SITL data is available in the fast-survey region of the orbit. # For many instruments, fast- and slow-survey data are combined into a single survey product self.mode = 'srvy' # This script works only for 'sitl' and 'l2' data if level not in ('sitl', 'l2'): raise ValueError('Level must be either "sitl" or "l2".') # Create an interface to the SDC self.mms = api.MrMMS_SDC_API(sc=sc, mode=self.mode, start_date=self.start_date, end_date=self.end_date) # Ensure that the log-in information is there. # - If the config file was already set, this step is redundant. self.mms._data_root = pymms.config['data_root'] if self.mode == 'sitl': self.mms._session.auth(pymms.config['username'], pymms.config['password'])
def load_amb_l1a(sc='mms1', mode='fast', level='l1a', optdesc='amb', start_date=None, end_date=None, rename_vars=True, **kwargs): """ Load EDI data. CDF variable names are renamed to something easier to remember and use. Original CDF variable names are kept as an attribute "cdf_name" in each individual variable. Parameters ---------- sc : str Spacecraft ID: ('mms1', 'mms2', 'mms3', 'mms4') mode : str Instrument mode: ('slow', 'srvy', 'fast', 'brst'). level : str Data quality level ('l1a', 'l2pre', 'l2') optdesc : str Optional descriptor. Options are: {'efield' | 'amb' | 'amb-pm2' | 'amb-alt-cc', 'amb-alt-oc', 'amb-alt-oob', 'amb-perp-c', 'amb-perp-ob'} start_date, end_date : `datetime.datetime` Start and end of the data interval. rename_vars : bool If true (default), rename the standard MMS variable names to something more memorable and easier to use. \*\*kwargs : dict Any keyword accepted by *pymms.data.util.load_data* Returns ------- dist : `xarray.Dataset` EDI data. """ # Download the data sdc = api.MrMMS_SDC_API(sc, 'edi', mode, level, optdesc=optdesc, start_date=start_date, end_date=end_date, **kwargs) files = sdc.download_files() try: files = api.sort_files(files)[0] except IndexError: raise IndexError('No files found: {0}'.format(sdc)) # Read all of the data files. Skip empty files unless all files are empty data = [] for file in files: data.append(util.cdf_to_ds(file, **kwargs)) # Variables must be concatenated based on their DEPEND_0 variable rec_vnames = ['Epoch', 'epoch_angle', 'epoch_timetag'] out_data = [] for recname in rec_vnames: # Remove all data not associated with the current record name drop_vars = [varname for varname in data[0] if recname not in data[0][varname].coords] drop_coords = [coord for coord in data[0].coords if coord != recname] rec_data = [ds.drop(drop_vars + drop_coords) for ds in data] # Concatenate remaining variables together out = xr.concat(rec_data, dim=recname) # Select the desired time range out = out.sel(indexers={recname: slice(start_date, end_date)}) # All datasets will be merged back together, so keep track of them out_data.append(out) # Combine the datasets back together data = xr.merge(out_data) # Rename data variables to something simpler if rename_vars: data = rename(data, sc, mode, level, optdesc) # Add data descriptors to attributes data.attrs['sc'] = sc data.attrs['instr'] = 'edi' data.attrs['mode'] = mode data.attrs['level'] = level data.attrs['optdesc'] = optdesc return data
# EDI Data edi_data = edi.load_data('mms1', 'srvy', optdesc='efield', start_date=t0, end_date=t1) edi_data = edi_data.rename({'Epoch': 'time'}) # FGM Data fgm_data = fgm.load_data('mms1', 'fgm', 'srvy', 'l2', t0, t1) # MEC Data sdc = api.MrMMS_SDC_API(sc, 'edp', 'fast', level, optdesc='dce', start_date=t0, end_date=t1) r_gse_vname = '_'.join((sc, 'mec', 'r', 'gse')) r_lbl_vname = '_'.join((sc, 'mec', 'r', 'gse', 'label')) v_gse_vname = '_'.join((sc, 'mec', 'v', 'gse')) v_lbl_vname = '_'.join((sc, 'mec', 'v', 'gse', 'label')) sdc.instr = 'mec' sdc.mode = mode sdc.optdesc = 'epht89d' files = sdc.download()
import pytest import os import datetime from pymms.sdc import mrmms_sdc_api as sdc sc = 'mms1' instr = 'fgm' mode = 'srvy' level = 'l2' start_date = datetime.datetime(2017, 11, 24, 0, 0, 0) end_date = datetime.datetime(2017, 11, 24, 23, 59, 59) api = sdc.MrMMS_SDC_API(sc, instr, mode, level, start_date=start_date, end_date=end_date) # Sample file names f_burst = '_'.join((sc, instr, 'brst', level, start_date.strftime('%Y%m%d%H%M%S'), 'v1.0.0') ) + '.cdf' f_survey = '_'.join((sc, instr, 'srvy', level, start_date.strftime('%Y%m%d'), 'v1.0.0') ) + '.cdf' f_selections = '_'.join(('gls_selections', 'mp-dl-unh', start_date.strftime('%Y-%m-%d-%H-%M-%S')) ) + '.sav' def test_init_attributes(): """ Check that the input values set the correct attributes.
def load_data(sc, mode, start_date, end_date, instr='fgm', level='l2', coords='gse', pd=False): """ Load FPI distribution function data. Parameters ---------- sc : str Spacecraft ID: ('mms1', 'mms2', 'mms3', 'mms4') mode : str Instrument mode: ('fast', 'brst'). If 'srvy' is given, it is automatically changed to 'fast'. start_date, end_date : `datetime.datetime` Start and end of the data interval. Returns ------- dist : `metaarray.metaarray` Particle distribution function. """ # Check the inputs check_spacecraft(sc) mode = check_mode(mode) check_level(level, instr=instr) check_coords(coords) # File and variable name parameters t_vname = 'Epoch' b_vname = '_'.join((sc, instr, 'b', coords, mode, level)) b_labl_vname = '_'.join(('label', 'b', coords)) # Download the data sdc = api.MrMMS_SDC_API(sc, instr, mode, level, start_date=start_date, end_date=end_date) fgm_files = sdc.download_files() fgm_files = api.sort_files(fgm_files)[0] # Read the data from files if pd: fgm_data = util.cdf_to_df(fgm_files, b_vname) util.rename_df_cols(fgm_data, b_vname, ('Bx', 'By', 'Bz', '|B|')) else: # Concatenate data along the records (time) dimension, which # should be equivalent to the DEPEND_0 variable name of the # magnetic field variable. fgm_data = [] for file in fgm_files: fgm_data.append(util.cdf_to_ds(file, b_vname)) fgm_data = xr.concat(fgm_data, dim=fgm_data[0][b_vname].dims[0]) fgm_data = fgm_data.rename({ t_vname: 'time', b_vname: 'B', b_labl_vname: 'B_index' }) fgm_data = fgm_data.assign_coords(B_index=['Bx', 'By', 'Bz', '|B|']) fgm_data = fgm_data.sel(time=slice(start_date, end_date)) return fgm_data
def load_data(sc='mms1', instr='fgm', mode='srvy', level='l2', optdesc=None, start_date=None, end_date=None, offline=False, record_dim='Epoch', team_site=False, data_type='science', **kwargs): """ Load MMS data. Empty files are silently skipped. NoVariablesInFileError is raised only if all files in time interval are empty. Parameters ---------- sc : str Spacecraft ID: ('mms1', 'mms2', 'mms3', 'mms4') instr : str Instrument ID mode : str Instrument mode: ('slow', 'fast', 'srvy', 'brst'). optdesc : str Optional descriptor for dataset start_date, end_date : `datetime.datetime` Start and end of the data interval. offline : bool If True, search only for local files record_dim : str Name of the record varying dimension. This is the dimension along which the data from different files will be concatenated. If *None*, the name of the leading dimension of the first data variable will be used. team_site : bool If True, search the password-protected team site data_type : str Type of data to download. ('science', 'hk', 'ancillary') \*\*kwargs : dict Keywords passed to *cdf_to_ds* Returns ------- data : `xarray.DataArray` or list The requested data. If data from all files can be concatenated successfully, a Dataset is returned. If not, a list of Datasets is returned, where each dataset is the data from a single file. """ if start_date is None: start_date = np.datetime64('2015-10-16T13:06:04') if end_date is None: end_date = np.datetime64('2015-10-16T13:07:20') site = 'public' if team_site: site = 'private' # Download the data sdc = api.MrMMS_SDC_API(sc, instr, mode, level, optdesc=optdesc, start_date=start_date, end_date=end_date, data_type=data_type, offline=offline) # The data level parameter will automatically set the site keyword. # If the user specifies the site, set it after instantiation. sdc.site = site files = sdc.download_files() try: files = api.sort_files(files)[0] except IndexError: raise IndexError('No files found: {0}'.format(sdc)) # Read all of the data files. Skip empty files unless all files are empty data = [] for file in files: try: data.append(cdf_to_ds(file, **kwargs)) except NoVariablesInFileError: pass if len(data) == 0: raise NoVariablesInFileError('All {0} files were empty.'.format( len(files))) # Determine the name of the record varying dimension. This should be the # value of the DEPEND_0 attribute of a data variable. if record_dim is None: varnames = [name for name in data[0].data_vars] rec_vname = data[0][varnames[0]].dims[0] else: rec_vname = record_dim # Notes: # 1. Concatenation can fail if, e.g., a variable does not have a # coordinate assigned along a given dimension. Instead of crashing, # return the list of datasets so that they can be corrected and # concatenated externally. # # 2. If data variables in the dataset do not have the dimension # identified by rec_vname, a new dimension is added. If the dataset is # large, this can cause xarray/python to use all available ram and # crash. A fix would be to 1) find all DEPEND_0 variables, 2) use the # data_vars='minimal' option to concat for each one, 3) combine the # resulting datasets back together. # # 3. If there is only one dataset in the list and that dataset is empty # then xr.concat will return the dataset even if the dim=rec_vname is # not present. try: data = xr.concat(data, dim=rec_vname) except (MemoryError, Exception) as E: return data # cdf_to_df loads all of the data from the file. Now we need to trim to # the time interval of interest try: data = data.sel(indexers={rec_vname: slice(start_date, end_date)}) except KeyError: warnings.warn('{0} out unordered; cannot slice.'.format(rec_vname)) # Keep information about the data data.attrs['sc'] = sc data.attrs['instr'] = instr data.attrs['mode'] = mode data.attrs['level'] = level data.attrs['optdesc'] = optdesc data.attrs['files'] = files return data
def plot_burst_selections(sc, start_date, end_date, figsize=(5.5, 7)): mode = 'srvy' level = 'l2' # FGM b_vname = '_'.join((sc, 'fgm', 'b', 'gse', mode, level)) api = sdc.MrMMS_SDC_API(sc, 'fgm', mode, level, start_date=start_date, end_date=end_date) files = api.download_files() files = sdc.sort_files(files)[0] fgm_data = metaarray.from_pycdf(files, b_vname, tstart=start_date, tend=end_date) # FPI DIS fpi_mode = 'fast' ni_vname = '_'.join((sc, 'dis', 'numberdensity', fpi_mode)) espec_i_vname = '_'.join((sc, 'dis', 'energyspectr', 'omni', fpi_mode)) api = sdc.MrMMS_SDC_API(sc, 'fpi', fpi_mode, level, optdesc='dis-moms', start_date=start_date, end_date=end_date) files = api.download_files() files = sdc.sort_files(files)[0] ni_data = metaarray.from_pycdf(files, ni_vname, tstart=start_date, tend=end_date) especi_data = metaarray.from_pycdf(files, espec_i_vname, tstart=start_date, tend=end_date) # FPI DES ne_vname = '_'.join((sc, 'des', 'numberdensity', fpi_mode)) espec_e_vname = '_'.join((sc, 'des', 'energyspectr', 'omni', fpi_mode)) api = sdc.MrMMS_SDC_API(sc, 'fpi', fpi_mode, level, optdesc='des-moms', start_date=start_date, end_date=end_date) files = api.download_files() files = sdc.sort_files(files)[0] ne_data = metaarray.from_pycdf(files, ne_vname, tstart=start_date, tend=end_date) espece_data = metaarray.from_pycdf(files, espec_e_vname, tstart=start_date, tend=end_date) # Grab selections # abs_files = sdc.sitl_selections('abs_selections', # start_date=start_date, end_date=end_date) # gls_files = sdc.sitl_selections('gls_selections', gls_type='mp-dl-unh', # start_date=start_date, end_date=end_date) # Read the files abs_data = sel.read_csv( (pathlib.Path(pymms.config['dropbox_root']) / 'selections' / 'abs_selections_all_20150901_000000.csv'), start_time=start_date, stop_time=end_date) sitl_data = sel.read_csv( (pathlib.Path(pymms.config['dropbox_root']) / 'selections' / 'sitl_selections_all_20150901_000000.csv'), start_time=start_date, stop_time=end_date) gls_data = sel.read_csv( (pathlib.Path(pymms.config['dropbox_root']) / 'selections' / 'gls_selections_all_20191016_000000.csv'), start_time=start_date, stop_time=end_date) # SITL data time series t_abs = [] x_abs = [] for selection in abs_data: t_abs.extend([ selection.start_time, selection.start_time, selection.stop_time, selection.stop_time ]) x_abs.extend([0, selection.fom, selection.fom, 0]) if len(abs_data) == 0: t_abs = [start_date, end_date] x_abs = [0, 0] abs = metaarray.MetaArray(x_abs, x0=metatime.MetaTime(t_abs)) t_sitl = [] x_sitl = [] for selection in sitl_data: t_sitl.extend([ selection.start_time, selection.start_time, selection.stop_time, selection.stop_time ]) x_sitl.extend([0, selection.fom, selection.fom, 0]) if len(sitl_data) == 0: t_sitl = [start_date, end_date] x_sitl = [0, 0] sitl = metaarray.MetaArray(x_sitl, x0=metatime.MetaTime(t_sitl)) t_gls = [] x_gls = [] for selection in gls_data: t_gls.extend([ selection.start_time, selection.start_time, selection.stop_time, selection.stop_time ]) x_gls.extend([0, selection.fom, selection.fom, 0]) if len(gls_data) == 0: t_gls = [start_date, end_date] x_gls = [0, 0] gls = metaarray.MetaArray(x_gls, x0=metatime.MetaTime(t_gls)) # Set attributes to make plot pretty especi_data.plot_title = sc.upper() especi_data.title = 'DEF' especi_data.x1.title = '$E_{ion}$\n(eV)' espece_data.title = 'DEF\n(keV/(cm^2 s sr keV))' espece_data.x1.title = '$E_{e-}$\n(eV)' fgm_data.title = 'B\n(nT)' fgm_data.label = ['Bx', 'By', 'Bz', '|B|'] ni_data.title = 'N\n($cm^{-3}$)' ne_data.title = 'N\n($cm^{-3}$)' abs.title = 'ABS' gls.title = 'GLS' gls.lim = (0, 200) sitl.title = 'SITL' # Plot fig, axes = metabase.MetaCache.plot( (especi_data, espece_data, fgm_data, ni_data, abs, gls, sitl), figsize=figsize) plt.subplots_adjust(left=0.15, right=0.85, top=0.93) return fig, axes
def load_data(sc='mms1', instr='fgm', mode='srvy', level='l2', optdesc=None, start_date=None, end_date=None, offline=False, record_dim=None, team_site=False, **kwargs): """ Load MMS data. Parameters ---------- sc : str Spacecraft ID: ('mms1', 'mms2', 'mms3', 'mms4') instr : str Instrument ID mode : str Instrument mode: ('slow', 'fast', 'srvy', 'brst'). optdesc : str Optional descriptor for dataset start_date, end_date : `datetime.datetime` Start and end of the data interval. offline : bool If True, search only for local files record_dim : str Name of the record varying dimension. This is the dimension along which the data from different files will be concatenated. If *None*, the name of the leading dimension of the first data variable will be used. team_site : bool If True, search the password-protected team site \*\*kwargs : dict Keywords passed to *cdf_to_ds* Returns ------- data : `xarray.DataArray` or list The requested data. If data from all files can be concatenated successfully, a Dataset is returned. If not, a list of Datasets is returned, where each dataset is the data from a single file. """ if start_date is None: start_date = np.datetime64('2015-10-16T13:06:04') if end_date is None: end_date = np.datetime64('2015-10-16T13:07:20') site = 'public' if team_site: site = 'private' # Download the data sdc = api.MrMMS_SDC_API(sc, instr, mode, level, optdesc=optdesc, start_date=start_date, end_date=end_date, offline=offline) # The data level parameter will automatically set the site keyword. # If the user specifies the site, set it after instantiation. sdc.site = site files = sdc.download_files() try: files = api.sort_files(files)[0] except IndexError: raise IndexError('No files found: {0}'.format(sdc)) # Concatenate data along the records (time) dimension, which # should be equivalent to the DEPEND_0 variable name of the # density variable. data = [] for file in files: data.append(cdf_to_ds(file, **kwargs)) # Concatenate all datasets along the time dimension. If not given, # assume that the time dimension is the leading dimension of the data # variables. if record_dim is None: varnames = [name for name in data[0].data_vars] rec_vname = data[0].data_vars[varnames[0]].dims[0] else: rec_vname = record_dim # Notes: # 1. Concatenation can fail if, e.g., a variable does not have a # coordinate assigned along a given dimension. Instead of crashing, # return the list of datasets so that they can be corrected and # concatenated externally. # # 2. If data variables in the dataset do not have the dimension # identified by rec_vname, a new dimension is added. If the dataset is # large, this can cause xarray/python to use all available ram and # crash. A fix would be to 1) find all DEPEND_0 variables, 2) use the # data_vars='minimal' option to concat for each one, 3) combine the # resulting datasets back together. try: data = xr.concat(data, dim=rec_vname) except Exception as E: return data # Keep information about the data data.attrs['sc'] = sc data.attrs['instr'] = instr data.attrs['mode'] = mode data.attrs['level'] = level data.attrs['optdesc'] = optdesc data.attrs['files'] = files return data
def load_data(sc='mms1', instr='fgm', mode='srvy', level='l2', optdesc=None, start_date=None, end_date=None, offline=False, record_dim='Epoch', team_site=False, **kwargs): """ Load FPI distribution function data. Parameters ---------- sc : str Spacecraft ID: ('mms1', 'mms2', 'mms3', 'mms4') instr : str Instrument ID mode : str Instrument mode: ('slow', 'fast', 'srvy', 'brst'). optdesc : str Optional descriptor for dataset start_date, end_date : `datetime.datetime` Start and end of the data interval. offline : bool If True, search only for local files record_dim : str Name of the record varying dimension. This is the dimension along which the data from different files will be concatenated. If *None*, the name of the leading dimension of the first data variable will be used. team_site : bool If True, search the password-protected team site \*\*kwargs : dict Keywords passed to *cdf_to_ds* Returns ------- data : `xarray.DataArray` The requested data. """ if start_date is None: start_date = np.datetime64('2015-10-16T13:06:04') if end_date is None: end_date = np.datetime64('2015-10-16T13:07:20') site = 'public' if team_site: site = 'private' # Download the data sdc = api.MrMMS_SDC_API(sc, instr, mode, level, optdesc=optdesc, start_date=start_date, end_date=end_date, offline=offline, site=site) files = sdc.download_files() files = api.sort_files(files)[0] # Concatenate data along the records (time) dimension, which # should be equivalent to the DEPEND_0 variable name of the # density variable. data = [] for file in files: data.append(cdf_to_ds(file, **kwargs)) # Concatenate all datasets along the time dimension. If not given, # assume that the time dimension is the leading dimension of the data # variables. if record_dim is None: varnames = [name for name in data[0].data_vars] rec_vname = data[0].data_vars[varnames[0]].dims[0] else: rec_vname = record_dim data = xr.concat(data, dim=rec_vname) data = data.sel({rec_vname: slice(start_date, end_date)}) # Keep information about the data data.attrs['sc'] = sc data.attrs['instr'] = instr data.attrs['mode'] = mode data.attrs['level'] = level data.attrs['optdesc'] = optdesc data.attrs['files'] = files return data
def plot_burst_selections(sc, start_date, end_date, figsize=(5.5, 7)): mode = 'srvy' level = 'l2' # FGM b_vname = '_'.join((sc, 'fgm', 'b', 'gse', mode, level)) mms = api.MrMMS_SDC_API(sc, 'fgm', mode, level, start_date=start_date, end_date=end_date) files = mms.download_files() files = api.sort_files(files)[0] fgm_data = from_cdflib(files, b_vname, start_date, end_date) fgm_data[fgm_data['LABL_PTR_1']]['data'] = ['Bx', 'By', 'Bz', '|B|'] # FPI DIS fpi_mode = 'fast' ni_vname = '_'.join((sc, 'dis', 'numberdensity', fpi_mode)) espec_i_vname = '_'.join((sc, 'dis', 'energyspectr', 'omni', fpi_mode)) mms = api.MrMMS_SDC_API(sc, 'fpi', fpi_mode, level, optdesc='dis-moms', start_date=start_date, end_date=end_date) files = mms.download_files() files = api.sort_files(files)[0] ni_data = from_cdflib(files, ni_vname, start_date, end_date) especi_data = from_cdflib(files, espec_i_vname, start_date, end_date) # FPI DES ne_vname = '_'.join((sc, 'des', 'numberdensity', fpi_mode)) espec_e_vname = '_'.join((sc, 'des', 'energyspectr', 'omni', fpi_mode)) mms = api.MrMMS_SDC_API(sc, 'fpi', fpi_mode, level, optdesc='des-moms', start_date=start_date, end_date=end_date) files = mms.download_files() files = api.sort_files(files)[0] ne_data = from_cdflib(files, ne_vname, start_date, end_date) espece_data = from_cdflib(files, espec_e_vname, start_date, end_date) # Grab selections abs_data = sel.selections('abs', start_date, end_date) sitl_data = sel.selections('sitl+back', start_date, end_date) gls_data = sel.selections('mp-dl-unh', start_date, end_date) # SITL data time series t_abs = [] x_abs = [] for selection in abs_data: t_abs.extend([selection.tstart, selection.tstart, selection.tstop, selection.tstop]) x_abs.extend([0, selection.fom, selection.fom, 0]) if len(abs_data) == 0: t_abs = [start_date, end_date] x_abs = [0, 0] abs = {'data': x_abs, 'DEPEND_0': 't', 't': {'data': t_abs}} t_sitl = [] x_sitl = [] for selection in sitl_data: t_sitl.extend([selection.tstart, selection.tstart, selection.tstop, selection.tstop]) x_sitl.extend([0, selection.fom, selection.fom, 0]) if len(sitl_data) == 0: t_sitl = [start_date, end_date] x_sitl = [0, 0] sitl = {'data': x_sitl, 'DEPEND_0': 't', 't': {'data': t_sitl}} t_gls = [] x_gls = [] for selection in gls_data: t_gls.extend([selection.tstart, selection.tstart, selection.tstop, selection.tstop]) x_gls.extend([0, selection.fom, selection.fom, 0]) if len(gls_data) == 0: t_gls = [start_date, end_date] x_gls = [0, 0] gls = {'data': x_gls, 'DEPEND_0': 't', 't': {'data': t_gls}} # Setup plot nrows = 7 ncols = 1 fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=figsize, squeeze=False) locator = mdates.AutoDateLocator() formatter = mdates.ConciseDateFormatter(locator) # Plot FGM plot_2D(especi_data, axes[0,0]) axes[0,0].set_title(sc.upper()) fig.axes[-1].set_label('DEF') axes[0,0].set_ylabel('$E_{ion}$\n(eV)') axes[0,0].set_xticks([]) axes[0,0].set_xlabel('') plot_2D(espece_data, axes[1,0]) fig.axes[-1].set_label('DEF\nLog_{10}(keV/(cm^2 s sr keV))') axes[1,0].set_ylabel('$E_{e-}$\n(eV)') axes[1,0].set_xticks([]) axes[1,0].set_xlabel('') axes[1,0].set_title('') plot_1D(fgm_data, axes[2,0]) axes[2,0].set_ylabel('B\n(nT)') axes[2,0].set_xticks([]) axes[2,0].set_xlabel('') axes[2,0].set_title('') plot_1D(ni_data, axes[3,0]) axes[3,0].set_ylabel('$N_{i}$\n($cm^{-3}$)') axes[3,0].set_xticks([]) axes[3,0].set_xlabel('') axes[3,0].set_title('') plot_1D(abs, axes[4,0]) axes[4,0].set_ylabel('ABS') axes[4,0].set_xticks([]) axes[4,0].set_xlabel('') axes[4,0].set_title('') plot_1D(gls, axes[5,0]) axes[5,0].set_ylabel('GLS') axes[5,0].set_ylim(0, 200) axes[5,0].set_xticks([]) axes[5,0].set_xlabel('') axes[5,0].set_title('') plot_1D(sitl, axes[6,0]) axes[6,0].set_ylabel('SITL') axes[6,0].set_title('') axes[6,0].xaxis.set_major_locator(locator) axes[6,0].xaxis.set_major_formatter(formatter) for tick in axes[6,0].get_xticklabels(): tick.set_rotation(45) # Set a common time range plt.setp(axes, xlim=mdates.date2num([start_date, end_date])) plt.subplots_adjust(left=0.15, right=0.85, top=0.93) return fig, axes