def test_sort_files(): fgm_brst_files = ['mms/data/mms1/fgm/srvy/l2/2017/11/mms1_fgm_srvy_l2_20171128_v5.115.0.cdf', 'mms/data/mms1/fgm/srvy/l2/2017/11/mms1_fgm_srvy_l2_20171127_v5.113.0.cdf', 'mms/data/mms1/fgm/srvy/l2/2017/11/mms1_fgm_srvy_l2_20171126_v5.113.0.cdf', 'mms/data/mms1/fgm/srvy/l2/2017/11/mms1_fgm_srvy_l2_20171125_v5.114.0.cdf', 'mms/data/mms1/fgm/srvy/l2/2017/11/mms1_fgm_srvy_l2_20171124_v5.113.0.cdf'] fgm_srvy_files = ['mms1_fgm_brst_l2_20171124231933_v5.113.0.cdf', 'mms1_fgm_brst_l2_20171124020733_v5.113.0.cdf', 'mms1_fgm_brst_l2_20171124020503_v5.113.0.cdf', 'mms1_fgm_brst_l2_20171124020233_v5.113.0.cdf', 'mms1_fgm_brst_l2_20171124020003_v5.113.0.cdf'] edp_files = ['mms1_edp_brst_l2_dce_20171124014733_v3.0.0.cdf', 'mms1_edp_brst_l2_dce_20171124014503_v3.0.0.cdf', 'mms1_edp_brst_l2_dce_20171124014233_v3.0.0.cdf', 'mms1_edp_brst_l2_dce_20171124014003_v3.0.0.cdf', 'mms1_edp_brst_l2_dce_20171124013733_v3.0.0.cdf', 'mms1_edp_brst_l2_dce_20171124013503_v3.0.0.cdf', 'mms1_edp_brst_l2_dce_20171124013233_v3.0.0.cdf', 'mms2_edp_brst_l2_dce_20171124012733_v3.0.0.cdf', 'mms2_edp_brst_l2_dce_20171124012503_v3.0.0.cdf', 'mms2_edp_brst_l2_dce_20171124012233_v3.0.0.cdf', 'mms2_edp_brst_l2_dce_20171124012003_v3.0.0.cdf', 'mms2_edp_brst_l2_dce_20171124011733_v3.0.0.cdf', 'mms2_edp_fast_l2_dce_20171125_v3.0.0.cdf', 'mms2_edp_fast_l2_dce_20171124_v3.0.0.cdf'] assert sdc.sort_files(fgm_brst_files)[0] == fgm_brst_files[::-1] assert sdc.sort_files(fgm_srvy_files)[0] == fgm_srvy_files[::-1] edp_sorted = sdc.sort_files(edp_files) for file_type in edp_sorted: ref_parts = file_type[0].split('_') try: tref = datetime.datetime.strptime(ref_parts[5], '%Y%m%d%H%M%S') except ValueError: tref = datetime.datetime.strptime(ref_parts[5], '%Y%m%d') for file in file_type: fparts = file.split('_') try: ftime = datetime.datetime.strptime(fparts[5], '%Y%m%d%H%M%S') except ValueError: ftime = datetime.datetime.strptime(fparts[5], '%Y%m%d') assert ((fparts[0:5] == ref_parts[0:5]) and (tref <= ftime) )
def load_data(sc, mode, start_date, end_date, level='l2', coords='gse'): """ Load EDP spacecraft potential data. Parameters ---------- sc : str Spacecraft ID: ('mms1', 'mms2', 'mms3', 'mms4') mode : str Instrument mode: ('fast', 'brst'). If 'srvy' is given, it is automatically changed to 'fast'. start_date, end_date : `datetime.datetime` Start and end of the data interval. Returns ------- scpot : `metaarray.metaarray` Spacecraft potential. """ # Check the inputs check_spacecraft(sc) mode = check_mode(mode) # File and variable name parameters instr = 'edp' optdesc = 'dce' t_vname = '_'.join((sc, instr, 'epoch', mode, level)) e_vname = '_'.join((sc, instr, optdesc, coords, mode, level)) # Download the data sdc = api.MrMMS_SDC_API(sc, instr, mode, level, optdesc=optdesc, start_date=start_date, end_date=end_date) edp_files = sdc.download_files() edp_files = api.sort_files(edp_files)[0] # Concatenate data along the records (time) dimension, which # should be equivalent to the DEPEND_0 variable name of the # magnetic field variable. edp_data = [] for file in edp_files: edp_data.append(util.cdf_to_ds(file, e_vname)) edp_data = xr.concat(edp_data, dim=edp_data[0][e_vname].dims[0]) edp_data = edp_data.rename({t_vname: 'time', e_vname: 'E'}) edp_data = edp_data.sel(time=slice(start_date, end_date)) edp_data.attrs['files'] = edp_files return edp_data
def load_amb_l1a(sc='mms1', mode='fast', level='l1a', optdesc='amb', start_date=None, end_date=None, rename_vars=True, **kwargs): """ Load EDI data. CDF variable names are renamed to something easier to remember and use. Original CDF variable names are kept as an attribute "cdf_name" in each individual variable. Parameters ---------- sc : str Spacecraft ID: ('mms1', 'mms2', 'mms3', 'mms4') mode : str Instrument mode: ('slow', 'srvy', 'fast', 'brst'). level : str Data quality level ('l1a', 'l2pre', 'l2') optdesc : str Optional descriptor. Options are: {'efield' | 'amb' | 'amb-pm2' | 'amb-alt-cc', 'amb-alt-oc', 'amb-alt-oob', 'amb-perp-c', 'amb-perp-ob'} start_date, end_date : `datetime.datetime` Start and end of the data interval. rename_vars : bool If true (default), rename the standard MMS variable names to something more memorable and easier to use. \*\*kwargs : dict Any keyword accepted by *pymms.data.util.load_data* Returns ------- dist : `xarray.Dataset` EDI data. """ # Download the data sdc = api.MrMMS_SDC_API(sc, 'edi', mode, level, optdesc=optdesc, start_date=start_date, end_date=end_date, **kwargs) files = sdc.download_files() try: files = api.sort_files(files)[0] except IndexError: raise IndexError('No files found: {0}'.format(sdc)) # Read all of the data files. Skip empty files unless all files are empty data = [] for file in files: data.append(util.cdf_to_ds(file, **kwargs)) # Variables must be concatenated based on their DEPEND_0 variable rec_vnames = ['Epoch', 'epoch_angle', 'epoch_timetag'] out_data = [] for recname in rec_vnames: # Remove all data not associated with the current record name drop_vars = [varname for varname in data[0] if recname not in data[0][varname].coords] drop_coords = [coord for coord in data[0].coords if coord != recname] rec_data = [ds.drop(drop_vars + drop_coords) for ds in data] # Concatenate remaining variables together out = xr.concat(rec_data, dim=recname) # Select the desired time range out = out.sel(indexers={recname: slice(start_date, end_date)}) # All datasets will be merged back together, so keep track of them out_data.append(out) # Combine the datasets back together data = xr.merge(out_data) # Rename data variables to something simpler if rename_vars: data = rename(data, sc, mode, level, optdesc) # Add data descriptors to attributes data.attrs['sc'] = sc data.attrs['instr'] = 'edi' data.attrs['mode'] = mode data.attrs['level'] = level data.attrs['optdesc'] = optdesc return data
def load_data(sc, mode, start_date, end_date, instr='fgm', level='l2', coords='gse', pd=False): """ Load FPI distribution function data. Parameters ---------- sc : str Spacecraft ID: ('mms1', 'mms2', 'mms3', 'mms4') mode : str Instrument mode: ('fast', 'brst'). If 'srvy' is given, it is automatically changed to 'fast'. start_date, end_date : `datetime.datetime` Start and end of the data interval. Returns ------- dist : `metaarray.metaarray` Particle distribution function. """ # Check the inputs check_spacecraft(sc) mode = check_mode(mode) check_level(level, instr=instr) check_coords(coords) # File and variable name parameters t_vname = 'Epoch' b_vname = '_'.join((sc, instr, 'b', coords, mode, level)) b_labl_vname = '_'.join(('label', 'b', coords)) # Download the data sdc = api.MrMMS_SDC_API(sc, instr, mode, level, start_date=start_date, end_date=end_date) fgm_files = sdc.download_files() fgm_files = api.sort_files(fgm_files)[0] # Read the data from files if pd: fgm_data = util.cdf_to_df(fgm_files, b_vname) util.rename_df_cols(fgm_data, b_vname, ('Bx', 'By', 'Bz', '|B|')) else: # Concatenate data along the records (time) dimension, which # should be equivalent to the DEPEND_0 variable name of the # magnetic field variable. fgm_data = [] for file in fgm_files: fgm_data.append(util.cdf_to_ds(file, b_vname)) fgm_data = xr.concat(fgm_data, dim=fgm_data[0][b_vname].dims[0]) fgm_data = fgm_data.rename({ t_vname: 'time', b_vname: 'B', b_labl_vname: 'B_index' }) fgm_data = fgm_data.assign_coords(B_index=['Bx', 'By', 'Bz', '|B|']) fgm_data = fgm_data.sel(time=slice(start_date, end_date)) return fgm_data
def load_data(sc='mms1', instr='fgm', mode='srvy', level='l2', optdesc=None, start_date=None, end_date=None, offline=False, record_dim='Epoch', team_site=False, data_type='science', **kwargs): """ Load MMS data. Empty files are silently skipped. NoVariablesInFileError is raised only if all files in time interval are empty. Parameters ---------- sc : str Spacecraft ID: ('mms1', 'mms2', 'mms3', 'mms4') instr : str Instrument ID mode : str Instrument mode: ('slow', 'fast', 'srvy', 'brst'). optdesc : str Optional descriptor for dataset start_date, end_date : `datetime.datetime` Start and end of the data interval. offline : bool If True, search only for local files record_dim : str Name of the record varying dimension. This is the dimension along which the data from different files will be concatenated. If *None*, the name of the leading dimension of the first data variable will be used. team_site : bool If True, search the password-protected team site data_type : str Type of data to download. ('science', 'hk', 'ancillary') \*\*kwargs : dict Keywords passed to *cdf_to_ds* Returns ------- data : `xarray.DataArray` or list The requested data. If data from all files can be concatenated successfully, a Dataset is returned. If not, a list of Datasets is returned, where each dataset is the data from a single file. """ if start_date is None: start_date = np.datetime64('2015-10-16T13:06:04') if end_date is None: end_date = np.datetime64('2015-10-16T13:07:20') site = 'public' if team_site: site = 'private' # Download the data sdc = api.MrMMS_SDC_API(sc, instr, mode, level, optdesc=optdesc, start_date=start_date, end_date=end_date, data_type=data_type, offline=offline) # The data level parameter will automatically set the site keyword. # If the user specifies the site, set it after instantiation. sdc.site = site files = sdc.download_files() try: files = api.sort_files(files)[0] except IndexError: raise IndexError('No files found: {0}'.format(sdc)) # Read all of the data files. Skip empty files unless all files are empty data = [] for file in files: try: data.append(cdf_to_ds(file, **kwargs)) except NoVariablesInFileError: pass if len(data) == 0: raise NoVariablesInFileError('All {0} files were empty.'.format( len(files))) # Determine the name of the record varying dimension. This should be the # value of the DEPEND_0 attribute of a data variable. if record_dim is None: varnames = [name for name in data[0].data_vars] rec_vname = data[0][varnames[0]].dims[0] else: rec_vname = record_dim # Notes: # 1. Concatenation can fail if, e.g., a variable does not have a # coordinate assigned along a given dimension. Instead of crashing, # return the list of datasets so that they can be corrected and # concatenated externally. # # 2. If data variables in the dataset do not have the dimension # identified by rec_vname, a new dimension is added. If the dataset is # large, this can cause xarray/python to use all available ram and # crash. A fix would be to 1) find all DEPEND_0 variables, 2) use the # data_vars='minimal' option to concat for each one, 3) combine the # resulting datasets back together. # # 3. If there is only one dataset in the list and that dataset is empty # then xr.concat will return the dataset even if the dim=rec_vname is # not present. try: data = xr.concat(data, dim=rec_vname) except (MemoryError, Exception) as E: return data # cdf_to_df loads all of the data from the file. Now we need to trim to # the time interval of interest try: data = data.sel(indexers={rec_vname: slice(start_date, end_date)}) except KeyError: warnings.warn('{0} out unordered; cannot slice.'.format(rec_vname)) # Keep information about the data data.attrs['sc'] = sc data.attrs['instr'] = instr data.attrs['mode'] = mode data.attrs['level'] = level data.attrs['optdesc'] = optdesc data.attrs['files'] = files return data
def plot_burst_selections(sc, start_date, end_date, figsize=(5.5, 7)): mode = 'srvy' level = 'l2' # FGM b_vname = '_'.join((sc, 'fgm', 'b', 'gse', mode, level)) api = sdc.MrMMS_SDC_API(sc, 'fgm', mode, level, start_date=start_date, end_date=end_date) files = api.download_files() files = sdc.sort_files(files)[0] fgm_data = metaarray.from_pycdf(files, b_vname, tstart=start_date, tend=end_date) # FPI DIS fpi_mode = 'fast' ni_vname = '_'.join((sc, 'dis', 'numberdensity', fpi_mode)) espec_i_vname = '_'.join((sc, 'dis', 'energyspectr', 'omni', fpi_mode)) api = sdc.MrMMS_SDC_API(sc, 'fpi', fpi_mode, level, optdesc='dis-moms', start_date=start_date, end_date=end_date) files = api.download_files() files = sdc.sort_files(files)[0] ni_data = metaarray.from_pycdf(files, ni_vname, tstart=start_date, tend=end_date) especi_data = metaarray.from_pycdf(files, espec_i_vname, tstart=start_date, tend=end_date) # FPI DES ne_vname = '_'.join((sc, 'des', 'numberdensity', fpi_mode)) espec_e_vname = '_'.join((sc, 'des', 'energyspectr', 'omni', fpi_mode)) api = sdc.MrMMS_SDC_API(sc, 'fpi', fpi_mode, level, optdesc='des-moms', start_date=start_date, end_date=end_date) files = api.download_files() files = sdc.sort_files(files)[0] ne_data = metaarray.from_pycdf(files, ne_vname, tstart=start_date, tend=end_date) espece_data = metaarray.from_pycdf(files, espec_e_vname, tstart=start_date, tend=end_date) # Grab selections # abs_files = sdc.sitl_selections('abs_selections', # start_date=start_date, end_date=end_date) # gls_files = sdc.sitl_selections('gls_selections', gls_type='mp-dl-unh', # start_date=start_date, end_date=end_date) # Read the files abs_data = sel.read_csv( (pathlib.Path(pymms.config['dropbox_root']) / 'selections' / 'abs_selections_all_20150901_000000.csv'), start_time=start_date, stop_time=end_date) sitl_data = sel.read_csv( (pathlib.Path(pymms.config['dropbox_root']) / 'selections' / 'sitl_selections_all_20150901_000000.csv'), start_time=start_date, stop_time=end_date) gls_data = sel.read_csv( (pathlib.Path(pymms.config['dropbox_root']) / 'selections' / 'gls_selections_all_20191016_000000.csv'), start_time=start_date, stop_time=end_date) # SITL data time series t_abs = [] x_abs = [] for selection in abs_data: t_abs.extend([ selection.start_time, selection.start_time, selection.stop_time, selection.stop_time ]) x_abs.extend([0, selection.fom, selection.fom, 0]) if len(abs_data) == 0: t_abs = [start_date, end_date] x_abs = [0, 0] abs = metaarray.MetaArray(x_abs, x0=metatime.MetaTime(t_abs)) t_sitl = [] x_sitl = [] for selection in sitl_data: t_sitl.extend([ selection.start_time, selection.start_time, selection.stop_time, selection.stop_time ]) x_sitl.extend([0, selection.fom, selection.fom, 0]) if len(sitl_data) == 0: t_sitl = [start_date, end_date] x_sitl = [0, 0] sitl = metaarray.MetaArray(x_sitl, x0=metatime.MetaTime(t_sitl)) t_gls = [] x_gls = [] for selection in gls_data: t_gls.extend([ selection.start_time, selection.start_time, selection.stop_time, selection.stop_time ]) x_gls.extend([0, selection.fom, selection.fom, 0]) if len(gls_data) == 0: t_gls = [start_date, end_date] x_gls = [0, 0] gls = metaarray.MetaArray(x_gls, x0=metatime.MetaTime(t_gls)) # Set attributes to make plot pretty especi_data.plot_title = sc.upper() especi_data.title = 'DEF' especi_data.x1.title = '$E_{ion}$\n(eV)' espece_data.title = 'DEF\n(keV/(cm^2 s sr keV))' espece_data.x1.title = '$E_{e-}$\n(eV)' fgm_data.title = 'B\n(nT)' fgm_data.label = ['Bx', 'By', 'Bz', '|B|'] ni_data.title = 'N\n($cm^{-3}$)' ne_data.title = 'N\n($cm^{-3}$)' abs.title = 'ABS' gls.title = 'GLS' gls.lim = (0, 200) sitl.title = 'SITL' # Plot fig, axes = metabase.MetaCache.plot( (especi_data, espece_data, fgm_data, ni_data, abs, gls, sitl), figsize=figsize) plt.subplots_adjust(left=0.15, right=0.85, top=0.93) return fig, axes
def load_data(sc='mms1', instr='fgm', mode='srvy', level='l2', optdesc=None, start_date=None, end_date=None, offline=False, record_dim=None, team_site=False, **kwargs): """ Load MMS data. Parameters ---------- sc : str Spacecraft ID: ('mms1', 'mms2', 'mms3', 'mms4') instr : str Instrument ID mode : str Instrument mode: ('slow', 'fast', 'srvy', 'brst'). optdesc : str Optional descriptor for dataset start_date, end_date : `datetime.datetime` Start and end of the data interval. offline : bool If True, search only for local files record_dim : str Name of the record varying dimension. This is the dimension along which the data from different files will be concatenated. If *None*, the name of the leading dimension of the first data variable will be used. team_site : bool If True, search the password-protected team site \*\*kwargs : dict Keywords passed to *cdf_to_ds* Returns ------- data : `xarray.DataArray` or list The requested data. If data from all files can be concatenated successfully, a Dataset is returned. If not, a list of Datasets is returned, where each dataset is the data from a single file. """ if start_date is None: start_date = np.datetime64('2015-10-16T13:06:04') if end_date is None: end_date = np.datetime64('2015-10-16T13:07:20') site = 'public' if team_site: site = 'private' # Download the data sdc = api.MrMMS_SDC_API(sc, instr, mode, level, optdesc=optdesc, start_date=start_date, end_date=end_date, offline=offline) # The data level parameter will automatically set the site keyword. # If the user specifies the site, set it after instantiation. sdc.site = site files = sdc.download_files() try: files = api.sort_files(files)[0] except IndexError: raise IndexError('No files found: {0}'.format(sdc)) # Concatenate data along the records (time) dimension, which # should be equivalent to the DEPEND_0 variable name of the # density variable. data = [] for file in files: data.append(cdf_to_ds(file, **kwargs)) # Concatenate all datasets along the time dimension. If not given, # assume that the time dimension is the leading dimension of the data # variables. if record_dim is None: varnames = [name for name in data[0].data_vars] rec_vname = data[0].data_vars[varnames[0]].dims[0] else: rec_vname = record_dim # Notes: # 1. Concatenation can fail if, e.g., a variable does not have a # coordinate assigned along a given dimension. Instead of crashing, # return the list of datasets so that they can be corrected and # concatenated externally. # # 2. If data variables in the dataset do not have the dimension # identified by rec_vname, a new dimension is added. If the dataset is # large, this can cause xarray/python to use all available ram and # crash. A fix would be to 1) find all DEPEND_0 variables, 2) use the # data_vars='minimal' option to concat for each one, 3) combine the # resulting datasets back together. try: data = xr.concat(data, dim=rec_vname) except Exception as E: return data # Keep information about the data data.attrs['sc'] = sc data.attrs['instr'] = instr data.attrs['mode'] = mode data.attrs['level'] = level data.attrs['optdesc'] = optdesc data.attrs['files'] = files return data
def load_data(sc='mms1', instr='fgm', mode='srvy', level='l2', optdesc=None, start_date=None, end_date=None, offline=False, record_dim='Epoch', team_site=False, **kwargs): """ Load FPI distribution function data. Parameters ---------- sc : str Spacecraft ID: ('mms1', 'mms2', 'mms3', 'mms4') instr : str Instrument ID mode : str Instrument mode: ('slow', 'fast', 'srvy', 'brst'). optdesc : str Optional descriptor for dataset start_date, end_date : `datetime.datetime` Start and end of the data interval. offline : bool If True, search only for local files record_dim : str Name of the record varying dimension. This is the dimension along which the data from different files will be concatenated. If *None*, the name of the leading dimension of the first data variable will be used. team_site : bool If True, search the password-protected team site \*\*kwargs : dict Keywords passed to *cdf_to_ds* Returns ------- data : `xarray.DataArray` The requested data. """ if start_date is None: start_date = np.datetime64('2015-10-16T13:06:04') if end_date is None: end_date = np.datetime64('2015-10-16T13:07:20') site = 'public' if team_site: site = 'private' # Download the data sdc = api.MrMMS_SDC_API(sc, instr, mode, level, optdesc=optdesc, start_date=start_date, end_date=end_date, offline=offline, site=site) files = sdc.download_files() files = api.sort_files(files)[0] # Concatenate data along the records (time) dimension, which # should be equivalent to the DEPEND_0 variable name of the # density variable. data = [] for file in files: data.append(cdf_to_ds(file, **kwargs)) # Concatenate all datasets along the time dimension. If not given, # assume that the time dimension is the leading dimension of the data # variables. if record_dim is None: varnames = [name for name in data[0].data_vars] rec_vname = data[0].data_vars[varnames[0]].dims[0] else: rec_vname = record_dim data = xr.concat(data, dim=rec_vname) data = data.sel({rec_vname: slice(start_date, end_date)}) # Keep information about the data data.attrs['sc'] = sc data.attrs['instr'] = instr data.attrs['mode'] = mode data.attrs['level'] = level data.attrs['optdesc'] = optdesc data.attrs['files'] = files return data
def plot_burst_selections(sc, start_date, end_date, figsize=(5.5, 7)): mode = 'srvy' level = 'l2' # FGM b_vname = '_'.join((sc, 'fgm', 'b', 'gse', mode, level)) mms = api.MrMMS_SDC_API(sc, 'fgm', mode, level, start_date=start_date, end_date=end_date) files = mms.download_files() files = api.sort_files(files)[0] fgm_data = from_cdflib(files, b_vname, start_date, end_date) fgm_data[fgm_data['LABL_PTR_1']]['data'] = ['Bx', 'By', 'Bz', '|B|'] # FPI DIS fpi_mode = 'fast' ni_vname = '_'.join((sc, 'dis', 'numberdensity', fpi_mode)) espec_i_vname = '_'.join((sc, 'dis', 'energyspectr', 'omni', fpi_mode)) mms = api.MrMMS_SDC_API(sc, 'fpi', fpi_mode, level, optdesc='dis-moms', start_date=start_date, end_date=end_date) files = mms.download_files() files = api.sort_files(files)[0] ni_data = from_cdflib(files, ni_vname, start_date, end_date) especi_data = from_cdflib(files, espec_i_vname, start_date, end_date) # FPI DES ne_vname = '_'.join((sc, 'des', 'numberdensity', fpi_mode)) espec_e_vname = '_'.join((sc, 'des', 'energyspectr', 'omni', fpi_mode)) mms = api.MrMMS_SDC_API(sc, 'fpi', fpi_mode, level, optdesc='des-moms', start_date=start_date, end_date=end_date) files = mms.download_files() files = api.sort_files(files)[0] ne_data = from_cdflib(files, ne_vname, start_date, end_date) espece_data = from_cdflib(files, espec_e_vname, start_date, end_date) # Grab selections abs_data = sel.selections('abs', start_date, end_date) sitl_data = sel.selections('sitl+back', start_date, end_date) gls_data = sel.selections('mp-dl-unh', start_date, end_date) # SITL data time series t_abs = [] x_abs = [] for selection in abs_data: t_abs.extend([selection.tstart, selection.tstart, selection.tstop, selection.tstop]) x_abs.extend([0, selection.fom, selection.fom, 0]) if len(abs_data) == 0: t_abs = [start_date, end_date] x_abs = [0, 0] abs = {'data': x_abs, 'DEPEND_0': 't', 't': {'data': t_abs}} t_sitl = [] x_sitl = [] for selection in sitl_data: t_sitl.extend([selection.tstart, selection.tstart, selection.tstop, selection.tstop]) x_sitl.extend([0, selection.fom, selection.fom, 0]) if len(sitl_data) == 0: t_sitl = [start_date, end_date] x_sitl = [0, 0] sitl = {'data': x_sitl, 'DEPEND_0': 't', 't': {'data': t_sitl}} t_gls = [] x_gls = [] for selection in gls_data: t_gls.extend([selection.tstart, selection.tstart, selection.tstop, selection.tstop]) x_gls.extend([0, selection.fom, selection.fom, 0]) if len(gls_data) == 0: t_gls = [start_date, end_date] x_gls = [0, 0] gls = {'data': x_gls, 'DEPEND_0': 't', 't': {'data': t_gls}} # Setup plot nrows = 7 ncols = 1 fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=figsize, squeeze=False) locator = mdates.AutoDateLocator() formatter = mdates.ConciseDateFormatter(locator) # Plot FGM plot_2D(especi_data, axes[0,0]) axes[0,0].set_title(sc.upper()) fig.axes[-1].set_label('DEF') axes[0,0].set_ylabel('$E_{ion}$\n(eV)') axes[0,0].set_xticks([]) axes[0,0].set_xlabel('') plot_2D(espece_data, axes[1,0]) fig.axes[-1].set_label('DEF\nLog_{10}(keV/(cm^2 s sr keV))') axes[1,0].set_ylabel('$E_{e-}$\n(eV)') axes[1,0].set_xticks([]) axes[1,0].set_xlabel('') axes[1,0].set_title('') plot_1D(fgm_data, axes[2,0]) axes[2,0].set_ylabel('B\n(nT)') axes[2,0].set_xticks([]) axes[2,0].set_xlabel('') axes[2,0].set_title('') plot_1D(ni_data, axes[3,0]) axes[3,0].set_ylabel('$N_{i}$\n($cm^{-3}$)') axes[3,0].set_xticks([]) axes[3,0].set_xlabel('') axes[3,0].set_title('') plot_1D(abs, axes[4,0]) axes[4,0].set_ylabel('ABS') axes[4,0].set_xticks([]) axes[4,0].set_xlabel('') axes[4,0].set_title('') plot_1D(gls, axes[5,0]) axes[5,0].set_ylabel('GLS') axes[5,0].set_ylim(0, 200) axes[5,0].set_xticks([]) axes[5,0].set_xlabel('') axes[5,0].set_title('') plot_1D(sitl, axes[6,0]) axes[6,0].set_ylabel('SITL') axes[6,0].set_title('') axes[6,0].xaxis.set_major_locator(locator) axes[6,0].xaxis.set_major_formatter(formatter) for tick in axes[6,0].get_xticklabels(): tick.set_rotation(45) # Set a common time range plt.setp(axes, xlim=mdates.date2num([start_date, end_date])) plt.subplots_adjust(left=0.15, right=0.85, top=0.93) return fig, axes