def dailynames(directory='', trange=None, res=24 * 3600., hour_res=False, file_format='%Y%m%d', prefix='', suffix=''): if trange == None: print('No trange specified') return if hour_res == True: res = 3600. file_format = '%Y%m%d%H' # allows the user to pass in trange as list of datetime objects if type(trange[0]) == datetime and type(trange[1]) == datetime: trange = [ time_string(trange[0].timestamp()), time_string(trange[1].timestamp()) ] tr = [trange[0], trange[1]] if isinstance(trange[0], str): tr[0] = time_double(trange[0]) if isinstance(trange[1], str): tr[1] = time_double(trange[1]) # Davin's magic heisted from file_dailynames in IDL mmtr = [np.floor(tr[0] / res), np.ceil(tr[1] / res)] if mmtr[1] - mmtr[0] < 1: n = 1 else: n = int(mmtr[1] - mmtr[0]) times = [(float(num) + mmtr[0]) * res for num in range(n)] dates = [] files = [] for time in times: if time_string(time, fmt=file_format) not in dates: dates.append(time_string(time, fmt=file_format)) for date in dates: files.append(directory + prefix + date + suffix) return files
def mms_read_feeps_sector_masks_csv(trange): """ This function returns the FEEPS sectors to mask due to sunlight contamination Parameters: trange : list of str time range of interest [starttime, endtime] with the format 'YYYY-MM-DD','YYYY-MM-DD'] or to specify more or less than a day ['YYYY-MM-DD/hh:mm:ss','YYYY-MM-DD/hh:mm:ss'] Returns: Hash table containing the sectors to mask for each spacecraft and sensor ID """ masks = {} dates = [ 1447200000.0000000, # 11/11/2015 1468022400.0000000, # 7/9/2016 1477612800.0000000, # 10/28/2016 1496188800.0000000, # 5/31/2017 1506988800.0000000, # 10/3/2017 1538697600.0000000 ] # 10/5/2018 # find the file closest to the start time nearest_date = dates[(np.abs(np.array(dates) - time_double(trange[0]))).argmin()] for mms_sc in [1, 2, 3, 4]: csv_file = os.sep.join([ os.path.dirname(os.path.abspath(__file__)), 'sun', 'MMS' + str(mms_sc) + '_FEEPS_ContaminatedSectors_' + time_string(nearest_date, fmt='%Y%m%d') + '.csv' ]) csv_file = open(csv_file, 'r') csv_reader = csv.reader(csv_file) csv_data = [] for line in csv_reader: csv_data.append([float(l) for l in line]) csv_file.close() csv_data = np.array(csv_data) for i in range(0, 12): mask_vals = [] for val_idx in range(0, len(csv_data[:, i])): if csv_data[val_idx, i] == 1: mask_vals.append(val_idx) masks['mms' + str(mms_sc) + 'imaskt' + str(i + 1)] = mask_vals for i in range(0, 12): mask_vals = [] for val_idx in range(0, len(csv_data[:, i + 12])): if csv_data[val_idx, i + 12] == 1: mask_vals.append(val_idx) masks['mms' + str(mms_sc) + 'imaskb' + str(i + 1)] = mask_vals return masks
def dailynames(directory='', trange=None, res=24 * 3600., hour_res=False, file_format='%Y%m%f', dir_format='', YYYY_MM_DIR=False, prefix='', suffix=''): if trange == None: print('No trange specified') return if hour_res == True: res = 3600. file_format = '%Y%m%d%H' if YYYY_MM_DIR: dir_format = '%Y/%m/' tr = [time_double(trange[0]), time_double(trange[1])] # Davin's magic heisted from file_dailynames in IDL mmtr = [np.floor(tr[0] / res), np.ceil(tr[1] / res)] if mmtr[1] - mmtr[0] < 1: n = 1 else: n = int(mmtr[1] - mmtr[0]) times = [(float(num) + mmtr[0]) * res for num in range(n)] dates = [] files = [] for time in times: if time_string(time, fmt=file_format) not in dates: dates.append(time_string(time, fmt=file_format)) for date in dates: files.append(directory + prefix + date + suffix) return files
def mms_load_data(trange=['2015-10-16', '2015-10-17'], probe='1', data_rate='srvy', level='l2', instrument='fgm', datatype='', varformat=None, prefix='', suffix='', get_support_data=False, time_clip=False, no_update=False, center_measurement=False, available=False, notplot=False, latest_version=False, major_version=False, min_version=None, cdf_version=None, spdf=False): """ This function loads MMS data into pyTplot variables This function is not meant to be called directly. Please see the individual load routines for documentation and use. """ if not isinstance(probe, list): probe = [probe] if not isinstance(data_rate, list): data_rate = [data_rate] if not isinstance(level, list): level = [level] if not isinstance(datatype, list): datatype = [datatype] probe = [str(p) for p in probe] # allows the user to pass in trange as list of datetime objects if type(trange[0]) == datetime and type(trange[1]) == datetime: trange = [ time_string(trange[0].timestamp()), time_string(trange[1].timestamp()) ] # allows the user to pass in trange as a list of floats (unix times) if isinstance(trange[0], float): trange[0] = time_string(trange[0]) if isinstance(trange[1], float): trange[1] = time_string(trange[1]) start_date = parse(trange[0]).strftime( '%Y-%m-%d') # need to request full day, then parse out later end_date = parse(time_string(time_double(trange[1]) - 0.1)).strftime( '%Y-%m-%d-%H-%M-%S' ) # -1 second to avoid getting data for the next day download_only = CONFIG['download_only'] no_download = False if no_update or CONFIG['no_download']: no_download = True if spdf: return mms_load_data_spdf(trange=trange, probe=probe, data_rate=data_rate, level=level, instrument=instrument, datatype=datatype, varformat=varformat, suffix=suffix, get_support_data=get_support_data, time_clip=time_clip, no_update=no_update, center_measurement=center_measurement, notplot=notplot, latest_version=latest_version, major_version=major_version, min_version=min_version, cdf_version=cdf_version) user = None if not no_download: sdc_session, user = mms_login_lasp() out_files = [] available_files = [] for prb in probe: for drate in data_rate: for lvl in level: for dtype in datatype: if user is None: url = 'https://lasp.colorado.edu/mms/sdc/public/files/api/v1/file_info/science?start_date=' + start_date + '&end_date=' + end_date + '&sc_id=mms' + prb + '&instrument_id=' + instrument + '&data_rate_mode=' + drate + '&data_level=' + lvl else: url = 'https://lasp.colorado.edu/mms/sdc/sitl/files/api/v1/file_info/science?start_date=' + start_date + '&end_date=' + end_date + '&sc_id=mms' + prb + '&instrument_id=' + instrument + '&data_rate_mode=' + drate + '&data_level=' + lvl if dtype != '': url = url + '&descriptor=' + dtype if CONFIG['debug_mode']: logging.info('Fetching: ' + url) if no_download == False: # query list of available files try: with warnings.catch_warnings(): warnings.simplefilter("ignore", category=ResourceWarning) http_json = sdc_session.get( url, verify=True).json() if CONFIG['debug_mode']: logging.info( 'Filtering the results down to your trange' ) files_in_interval = mms_files_in_interval( http_json['files'], trange) if available: for file in files_in_interval: logging.info( file['file_name'] + ' (' + str( np.round(file['file_size'] / (1024. * 1024), decimals=1)) + ' MB)') available_files.append(file['file_name']) continue for file in files_in_interval: file_date = parse(file['timetag']) if dtype == '': out_dir = os.sep.join([ CONFIG['local_data_dir'], 'mms' + prb, instrument, drate, lvl, file_date.strftime('%Y'), file_date.strftime('%m') ]) else: out_dir = os.sep.join([ CONFIG['local_data_dir'], 'mms' + prb, instrument, drate, lvl, dtype, file_date.strftime('%Y'), file_date.strftime('%m') ]) if drate.lower() == 'brst': out_dir = os.sep.join( [out_dir, file_date.strftime('%d')]) out_file = os.sep.join( [out_dir, file['file_name']]) if CONFIG['debug_mode']: logging.info('File: ' + file['file_name'] + ' / ' + file['timetag']) if os.path.exists(out_file) and str( os.stat(out_file).st_size) == str( file['file_size']): if not download_only: logging.info('Loading ' + out_file) out_files.append(out_file) continue if user is None: download_url = 'https://lasp.colorado.edu/mms/sdc/public/files/api/v1/download/science?file=' + file[ 'file_name'] else: download_url = 'https://lasp.colorado.edu/mms/sdc/sitl/files/api/v1/download/science?file=' + file[ 'file_name'] logging.info('Downloading ' + file['file_name'] + ' to ' + out_dir) with warnings.catch_warnings(): warnings.simplefilter( "ignore", category=ResourceWarning) fsrc = sdc_session.get(download_url, stream=True, verify=True) ftmp = NamedTemporaryFile(delete=False) with open(ftmp.name, 'wb') as f: copyfileobj(fsrc.raw, f) if not os.path.exists(out_dir): os.makedirs(out_dir) # if the download was successful, copy to data directory copy(ftmp.name, out_file) out_files.append(out_file) fsrc.close() ftmp.close() except requests.exceptions.ConnectionError: # No/bad internet connection; try loading the files locally logging.error('No internet connection!') if out_files == []: if not download_only: logging.info('Searching for local files...') out_files = mms_get_local_files( prb, instrument, drate, lvl, dtype, trange) if out_files == [] and CONFIG[ 'mirror_data_dir'] != None: # check for network mirror; note: network mirrors are assumed to be read-only # and we always copy the files from the mirror to the local data directory # before trying to load into tplot variables logging.info( 'No local files found; checking network mirror...' ) out_files = mms_get_local_files(prb, instrument, drate, lvl, dtype, trange, mirror=True) if not no_download: sdc_session.close() if available: return available_files if not download_only: out_files = sorted(out_files) filtered_out_files = mms_file_filter(out_files, latest_version=latest_version, major_version=major_version, min_version=min_version, version=cdf_version) if filtered_out_files == []: logging.info('No matching CDF versions found.') return new_variables = cdf_to_tplot(filtered_out_files, varformat=varformat, merge=True, get_support_data=get_support_data, prefix=prefix, suffix=suffix, center_measurement=center_measurement, notplot=notplot) if notplot: return new_variables if new_variables == []: logging.warning('No data loaded.') return if time_clip: for new_var in new_variables: tclip(new_var, trange[0], trange[1], suffix='') return new_variables else: return out_files
# to return the actual data values, use get_data from pytplot import get_data times, fgm_data = get_data('mms4_fgm_b_gsm_brst_l2') # times are unix time (seconds since 1 January 1970) print(times[0]) # FGM data include the magnitude fgm_data[0] # you can convert the unix time to a string with time_string from pyspedas import time_string print(time_string(times[0])) # and convert back to unix time using time_double from pyspedas import time_double print(time_double('2015-10-16 13:06:00.00451')) # create new tplot variables with store_data from pytplot import store_data # save the B-field vector store_data('b_vector', data={'x': times, 'y': fgm_data[:, 0:3]}) # save the B-field magnitude store_data('b_mag', data={'x': times, 'y': fgm_data[:, 3]}) # the keywords are very flexible, e.g.,
def test_time_string(self): self.assertTrue( time_string(1450181243.767) == '2015-12-15 12:07:23.767000') self.assertTrue( time_string([1450181243.767, 1450181263.767]) == ['2015-12-15 12:07:23.767000', '2015-12-15 12:07:43.767000'])
def dailynames(directory='', trange=None, res=24 * 3600., hour_res=False, file_format='%Y%m%d', prefix='', suffix=''): ''' Creates a list of file names using a time range, resoution and file format Based on Davin Larson's file_dailynames in IDL SPEDAS Parameters: directory: str String containing the directory for the generated file names trange: list of str, list of datetime or list of floats Two-element list containing the start and end times for the file names res: float File name resolution in seconds (default: 24*3600., i.e., daily) file_format: str Format of the file names using strftime directives; for reference: https://strftime.org (default: %Y%m%d, i.e., daily) prefix: str file name prefix suffix: str file name suffix Returns: List containing filenames ''' if trange is None: print('No trange specified') return if hour_res == True: res = 3600. file_format = '%Y%m%d%H' # allows the user to pass in trange as list of datetime objects if type(trange[0]) == datetime and type(trange[1]) == datetime: trange = [ time_string(trange[0].timestamp()), time_string(trange[1].timestamp()) ] tr = [trange[0], trange[1]] if isinstance(trange[0], str): tr[0] = time_double(trange[0]) if isinstance(trange[1], str): tr[1] = time_double(trange[1]) # Davin's magic heisted from file_dailynames in IDL mmtr = [np.floor(tr[0] / res), np.ceil(tr[1] / res)] if mmtr[1] - mmtr[0] < 1: n = 1 else: n = int(mmtr[1] - mmtr[0]) times = [(float(num) + mmtr[0]) * res for num in range(n)] dates = [] files = [] for time in times: if time_string(time, fmt=file_format) not in dates: dates.append(time_string(time, fmt=file_format)) for date in dates: files.append(directory + prefix + date + suffix) return files
def mms_load_sroi_segments(trange=None, probe=1, suffix=''): """ This function loads the Science Region of Interest (SRoI) segment intervals Parameters: trange: list of str time range of interest [starttime, endtime] with the format 'YYYY-MM-DD','YYYY-MM-DD'] or to specify more or less than a day ['YYYY-MM-DD/hh:mm:ss','YYYY-MM-DD/hh:mm:ss'] probe: str or int Spacecraft probe # (default: 1) suffix: str Suffix to append to the end of the tplot variables Returns: Tuple containing (start_times, end_times) """ if not isinstance(probe, str): probe = str(probe) if trange is None: logging.error('Error; no trange specified.') return None tr = time_double(trange) unix_start, unix_end = get_mms_srois( start_time=time_string(tr[0] - 2 * 86400.0, fmt='%Y-%m-%d %H:%M:%S'), end_time=time_string(tr[1] + 2 * 86400.0, fmt='%Y-%m-%d %H:%M:%S'), sc_id='mms' + str(probe)) if len(unix_start) == 0: return ([], []) times_in_range = (unix_start >= tr[0] - 2 * 86400.0) & ( unix_start <= tr[1] + 2 * 86400.0) unix_start = unix_start[times_in_range] unix_end = unix_end[times_in_range] start_out = [] end_out = [] bar_x = [] bar_y = [] for start_time, end_time in zip(unix_start, unix_end): if end_time >= tr[0] and start_time <= tr[1]: bar_x.extend([start_time, start_time, end_time, end_time]) bar_y.extend([np.nan, 0., 0., np.nan]) start_out.append(start_time) end_out.append(end_time) vars_created = store_data('mms' + probe + '_bss_sroi' + suffix, data={ 'x': bar_x, 'y': bar_y }) if not vars_created: logging.error('Error creating SRoI segment intervals tplot variable') return None options('mms' + probe + '_bss_sroi' + suffix, 'panel_size', 0.09) options('mms' + probe + '_bss_sroi' + suffix, 'thick', 2) options('mms' + probe + '_bss_sroi' + suffix, 'Color', 'green') options('mms' + probe + '_bss_sroi' + suffix, 'border', False) options('mms' + probe + '_bss_sroi' + suffix, 'yrange', [-0.001, 0.001]) options('mms' + probe + '_bss_sroi' + suffix, 'legend_names', ['Fast']) options('mms' + probe + '_bss_sroi' + suffix, 'ytitle', '') return (start_out, end_out)
def mms_feeps_remove_bad_data(probe='1', data_rate='srvy', datatype='electron', level='l2', suffix=''): data_rate_level = data_rate + '_' + level # electrons first, remove bad eyes #;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; 1. BAD EYES ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; # First, here is a list of the EYES that are bad, we need to make sure these # data are not usable (i.e., make all of the counts/rate/flux data from these eyes NAN). # These are for all modes, burst and survey: bad_data_table = {} bad_data_table['2017-10-01'] = {} bad_data_table['2017-10-01']['mms1'] = {'top': [1], 'bottom': [1, 11]} bad_data_table['2017-10-01']['mms2'] = {'top': [5, 7, 12], 'bottom': [7]} bad_data_table['2017-10-01']['mms3'] = { 'top': [2, 12], 'bottom': [2, 5, 11] } bad_data_table['2017-10-01']['mms4'] = { 'top': [1, 2, 7], 'bottom': [2, 4, 5, 10, 11] } bad_data_table['2018-10-01'] = {} bad_data_table['2018-10-01']['mms1'] = {'top': [1], 'bottom': [1, 11]} bad_data_table['2018-10-01']['mms2'] = {'top': [7, 12], 'bottom': [2, 12]} bad_data_table['2018-10-01']['mms3'] = {'top': [1, 2], 'bottom': [5, 11]} bad_data_table['2018-10-01']['mms4'] = {'top': [1, 7], 'bottom': [4, 11]} dates = np.asarray(time_double(list(bad_data_table.keys()))) closest_table_tm = (np.abs(dates - dt.datetime.now().timestamp())).argmin() closest_table = time_string(dates[closest_table_tm], '%Y-%m-%d') bad_data = bad_data_table[closest_table]['mms' + probe] bad_vars = [] # top electrons for bad_var in bad_data['top']: if bad_var in [6, 7, 8]: continue # ion eyes bad_vars.append( tnames('mms' + str(probe) + '_epd_feeps_' + data_rate_level + '_electron_top_count_rate_sensorid_' + str(bad_var) + suffix)) bad_vars.append( tnames('mms' + str(probe) + '_epd_feeps_' + data_rate_level + '_electron_top_intensity_sensorid_' + str(bad_var) + suffix)) bad_vars.append( tnames('mms' + str(probe) + '_epd_feeps_' + data_rate_level + '_electron_top_counts_sensorid_' + str(bad_var) + suffix)) # bottom electrons for bad_var in bad_data['bottom']: if bad_var in [6, 7, 8]: continue # ion eyes bad_vars.append( tnames('mms' + str(probe) + '_epd_feeps_' + data_rate_level + '_electron_bottom_count_rate_sensorid_' + str(bad_var) + suffix)) bad_vars.append( tnames('mms' + str(probe) + '_epd_feeps_' + data_rate_level + '_electron_bottom_intensity_sensorid_' + str(bad_var) + suffix)) bad_vars.append( tnames('mms' + str(probe) + '_epd_feeps_' + data_rate_level + '_electron_bottom_counts_sensorid_' + str(bad_var) + suffix)) # top ions for bad_var in bad_data['top']: if bad_var not in [6, 7, 8]: continue # ion eyes bad_vars.append( tnames('mms' + str(probe) + '_epd_feeps_' + data_rate_level + '_ion_top_count_rate_sensorid_' + str(bad_var) + suffix)) bad_vars.append( tnames('mms' + str(probe) + '_epd_feeps_' + data_rate_level + '_ion_top_intensity_sensorid_' + str(bad_var) + suffix)) bad_vars.append( tnames('mms' + str(probe) + '_epd_feeps_' + data_rate_level + '_ion_top_counts_sensorid_' + str(bad_var) + suffix)) # bottom ions for bad_var in bad_data['top']: if bad_var not in [6, 7, 8]: continue # ion eyes bad_vars.append( tnames('mms' + str(probe) + '_epd_feeps_' + data_rate_level + '_ion_bottom_count_rate_sensorid_' + str(bad_var) + suffix)) bad_vars.append( tnames('mms' + str(probe) + '_epd_feeps_' + data_rate_level + '_ion_bottom_intensity_sensorid_' + str(bad_var) + suffix)) bad_vars.append( tnames('mms' + str(probe) + '_epd_feeps_' + data_rate_level + '_ion_bottom_counts_sensorid_' + str(bad_var) + suffix)) for bad_var in bad_vars: if bad_var == []: continue bad_var_data = pytplot.get_data(bad_var[0]) if bad_var_data is not None: times, data = bad_var_data energies = pytplot.data_quants[bad_var[0]].spec_bins.values # check if the energy table contains all names if np.isnan(np.sum(energies)): continue data[:] = np.nan pytplot.store_data(bad_var[0], data={ 'x': times, 'y': data, 'v': energies.reshape(energies.size) }) # ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; 2. BAD LOWEST E-CHANNELS ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; # ; Next, these eyes have bad first channels (i.e., lowest energy channel, E-channel 0 in IDL indexing). # ; Again, these data (just the counts/rate/flux from the lowest energy channel ONLY!!!) # ; should be hardwired to be NAN for all modes (burst and both types of survey). # ; The eyes not listed here or above are ok though... so once we do this, we can actually start # ; showing the data down to the lowest levels (~33 keV), meaning we'll have to adjust the hard-coded # ; ylim settings in SPEDAS and the SITL software: # ; from Drew Turner, 5Oct18: # ;Bad Channels (0 and 1): # ;Update: All channels 0 (Ch0) on MMS-2, -3, and -4 electron eyes (1, 2, 3, 4, 5, 9, 10, 11, 12) should be NaN # ;Additionally, the second channels (Ch1) on the following should also be made NaN: # ;MMS-1: Top: Ch0 on Eyes 6, 7 # ;Bot: Ch0 on Eyes 6, 7, 8 # ;MMS-2: Top: # ;Bot: Ch0 on Eyes 6, 8 # ;MMS-3: Top: Ch0 on Eye 8 # ;Bot: Ch0 on Eyes 6, 7 # ;MMS-4: Top: Ch1 on Eye 1; Ch0 on Eye 8 # ;Bot: Ch0 on Eyes 6, 7, 8; Ch1 on Eye 9 bad_vars = [] bad_vars_both_chans = [] bad_ch0 = {} bad_ch0['mms1'] = { 'top': [2, 5, 6, 7], 'bottom': [2, 3, 4, 5, 6, 7, 8, 9, 11, 12] } bad_ch0['mms2'] = { 'top': [1, 2, 3, 4, 5, 9, 10, 11, 12], 'bottom': [1, 2, 3, 4, 5, 6, 8, 9, 10, 11, 12] } bad_ch0['mms3'] = { 'top': [1, 2, 3, 4, 5, 8, 9, 10, 11, 12], 'bottom': [1, 2, 3, 4, 5, 6, 7, 9, 10, 11, 12] } bad_ch0['mms4'] = { 'top': [1, 2, 3, 4, 5, 8, 9, 10, 11, 12], 'bottom': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] } bad_ch1 = {} bad_ch1['mms1'] = {'top': [], 'bottom': [11]} bad_ch1['mms2'] = {'top': [8], 'bottom': [12]} bad_ch1['mms3'] = {'top': [1], 'bottom': []} bad_ch1['mms4'] = {'top': [1], 'bottom': [6, 9]} bad_ch0 = bad_ch0['mms' + str(probe)] bad_ch1 = bad_ch1['mms' + str(probe)] #### bottom channel # top electrons for bad_var in bad_ch0['top']: if bad_var in [6, 7, 8]: continue # ion eyes bad_vars.append( tnames('mms' + str(probe) + '_epd_feeps_' + data_rate_level + '_electron_top_count_rate_sensorid_' + str(bad_var) + suffix)) bad_vars.append( tnames('mms' + str(probe) + '_epd_feeps_' + data_rate_level + '_electron_top_intensity_sensorid_' + str(bad_var) + suffix)) bad_vars.append( tnames('mms' + str(probe) + '_epd_feeps_' + data_rate_level + '_electron_top_counts_sensorid_' + str(bad_var) + suffix)) # bottom electrons for bad_var in bad_ch0['bottom']: if bad_var in [6, 7, 8]: continue # ion eyes bad_vars.append( tnames('mms' + str(probe) + '_epd_feeps_' + data_rate_level + '_electron_bottom_count_rate_sensorid_' + str(bad_var) + suffix)) bad_vars.append( tnames('mms' + str(probe) + '_epd_feeps_' + data_rate_level + '_electron_bottom_intensity_sensorid_' + str(bad_var) + suffix)) bad_vars.append( tnames('mms' + str(probe) + '_epd_feeps_' + data_rate_level + '_electron_bottom_counts_sensorid_' + str(bad_var) + suffix)) # top ions for bad_var in bad_ch0['top']: if bad_var not in [6, 7, 8]: continue # ion eyes bad_vars.append( tnames('mms' + str(probe) + '_epd_feeps_' + data_rate_level + '_ion_top_count_rate_sensorid_' + str(bad_var) + suffix)) bad_vars.append( tnames('mms' + str(probe) + '_epd_feeps_' + data_rate_level + '_ion_top_intensity_sensorid_' + str(bad_var) + suffix)) bad_vars.append( tnames('mms' + str(probe) + '_epd_feeps_' + data_rate_level + '_ion_top_counts_sensorid_' + str(bad_var) + suffix)) # bottom ions for bad_var in bad_ch0['bottom']: if bad_var not in [6, 7, 8]: continue # ion eyes bad_vars.append( tnames('mms' + str(probe) + '_epd_feeps_' + data_rate_level + '_ion_bottom_count_rate_sensorid_' + str(bad_var) + suffix)) bad_vars.append( tnames('mms' + str(probe) + '_epd_feeps_' + data_rate_level + '_ion_bottom_intensity_sensorid_' + str(bad_var) + suffix)) bad_vars.append( tnames('mms' + str(probe) + '_epd_feeps_' + data_rate_level + '_ion_bottom_counts_sensorid_' + str(bad_var) + suffix)) #### bottom 2 channels # top electrons for bad_var in bad_ch1['top']: if bad_var in [6, 7, 8]: continue # ion eyes bad_vars_both_chans.append( tnames('mms' + str(probe) + '_epd_feeps_' + data_rate_level + '_electron_top_count_rate_sensorid_' + str(bad_var) + suffix)) bad_vars_both_chans.append( tnames('mms' + str(probe) + '_epd_feeps_' + data_rate_level + '_electron_top_intensity_sensorid_' + str(bad_var) + suffix)) bad_vars_both_chans.append( tnames('mms' + str(probe) + '_epd_feeps_' + data_rate_level + '_electron_top_counts_sensorid_' + str(bad_var) + suffix)) # bottom electrons for bad_var in bad_ch1['bottom']: if bad_var in [6, 7, 8]: continue # ion eyes bad_vars_both_chans.append( tnames('mms' + str(probe) + '_epd_feeps_' + data_rate_level + '_electron_bottom_count_rate_sensorid_' + str(bad_var) + suffix)) bad_vars_both_chans.append( tnames('mms' + str(probe) + '_epd_feeps_' + data_rate_level + '_electron_bottom_intensity_sensorid_' + str(bad_var) + suffix)) bad_vars_both_chans.append( tnames('mms' + str(probe) + '_epd_feeps_' + data_rate_level + '_electron_bottom_counts_sensorid_' + str(bad_var) + suffix)) # top ions for bad_var in bad_ch1['top']: if bad_var not in [6, 7, 8]: continue # ion eyes bad_vars_both_chans.append( tnames('mms' + str(probe) + '_epd_feeps_' + data_rate_level + '_ion_top_count_rate_sensorid_' + str(bad_var) + suffix)) bad_vars_both_chans.append( tnames('mms' + str(probe) + '_epd_feeps_' + data_rate_level + '_ion_top_intensity_sensorid_' + str(bad_var) + suffix)) bad_vars_both_chans.append( tnames('mms' + str(probe) + '_epd_feeps_' + data_rate_level + '_ion_top_counts_sensorid_' + str(bad_var) + suffix)) # bottom ions for bad_var in bad_ch1['bottom']: if bad_var not in [6, 7, 8]: continue # ion eyes bad_vars_both_chans.append( tnames('mms' + str(probe) + '_epd_feeps_' + data_rate_level + '_ion_bottom_count_rate_sensorid_' + str(bad_var) + suffix)) bad_vars_both_chans.append( tnames('mms' + str(probe) + '_epd_feeps_' + data_rate_level + '_ion_bottom_intensity_sensorid_' + str(bad_var) + suffix)) bad_vars_both_chans.append( tnames('mms' + str(probe) + '_epd_feeps_' + data_rate_level + '_ion_bottom_counts_sensorid_' + str(bad_var) + suffix)) # set the first energy channel to NaN for bad_var in bad_vars: if bad_var == []: continue bad_var_data = pytplot.get_data(bad_var[0]) if bad_var_data is not None: times, data = bad_var_data energies = pytplot.data_quants[bad_var[0]].spec_bins.values # check if the energy table contains all names if np.isnan(np.sum(energies)): continue data[:, 0] = np.nan pytplot.store_data(bad_var[0], data={ 'x': times, 'y': data, 'v': energies.reshape(energies.size) }) # set the first and second energy channels to NaN for bad_var in bad_vars_both_chans: if bad_var == []: continue bad_var_data = pytplot.get_data(bad_var[0]) if bad_var_data is not None: times, data = bad_var_data energies = pytplot.data_quants[bad_var[0]].spec_bins.values # check if the energy table contains all names if np.isnan(np.sum(energies)): continue data[:, 0] = np.nan data[:, 1] = np.nan pytplot.store_data(bad_var[0], data={ 'x': times, 'y': data, 'v': energies.reshape(energies.size) })
def mms_get_state_data(probe='1', trange=['2015-10-16', '2015-10-17'], datatypes=['pos', 'vel'], level='def', no_download=False, pred_or_def=True, suffix='', always_prompt=False): """ Helper routine for loading state data (ASCII files from the SDC); not meant to be called directly; see pyspedas.mms.state instead """ if not isinstance(probe, list): probe = [probe] local_data_dir = CONFIG['local_data_dir'] download_only = CONFIG['download_only'] start_time = time_double(trange[0]) - 60 * 60 * 24. end_time = time_double(trange[1]) # check if end date is anything other than 00:00:00, if so # add a day to the end time to ensure that all data is downloaded if type(trange[1]) == str: endtime_day = time_double( time_string(time_double(trange[1]), fmt='%Y-%m-%d')) else: endtime_day = time_double(time_string(trange[1], fmt='%Y-%m-%d')) if end_time > endtime_day: add_day = 60 * 60 * 24. else: add_day = 0 start_time_str = time_string(start_time, fmt='%Y-%m-%d') end_time_str = time_string(end_time + add_day, fmt='%Y-%m-%d') filetypes = [] if 'pos' in datatypes or 'vel' in datatypes: filetypes.append('eph') if 'spinras' in datatypes or 'spindec' in datatypes: filetypes.append('att') user = None if not no_download: sdc_session, user = mms_login_lasp(always_prompt=always_prompt) for probe_id in probe: # probe will need to be a string from now on probe_id = str(probe_id) for filetype in filetypes: file_dir = local_data_dir + 'ancillary/' + 'mms' + probe_id + '/' + level + filetype + '/' product = level + filetype # predicted doesn't support start_date/end_date if level == 'def': dates_for_query = '&start_date=' + start_time_str + '&end_date=' + end_time_str else: dates_for_query = '' if user == None: url = 'https://lasp.colorado.edu/mms/sdc/public/files/api/v1/file_info/ancillary?sc_id=mms' + probe_id + '&product=' + product + dates_for_query else: url = 'https://lasp.colorado.edu/mms/sdc/sitl/files/api/v1/file_info/ancillary?sc_id=mms' + probe_id + '&product=' + product + dates_for_query with warnings.catch_warnings(): warnings.simplefilter("ignore", category=ResourceWarning) http_request = sdc_session.get(url, verify=True) http_json = http_request.json() out_dir = os.sep.join([ local_data_dir, 'ancillary', 'mms' + probe_id, level + filetype ]) files_in_interval = [] out_files = [] # since predicted doesn't support start_date/end_date, we'll need to parse the correct dates if level != 'def': for file in http_json['files']: # first, remove the dates that start after the end of the trange if time_double(file['start_date']) > endtime_day: continue # now remove files that end before the start of the trange if start_time > time_double(file['end_date']): continue files_in_interval.append(file) break else: files_in_interval = http_json['files'] for file in files_in_interval: out_file = os.sep.join([out_dir, file['file_name']]) if os.path.exists(out_file) and str( os.stat(out_file).st_size) == str(file['file_size']): if not download_only: logging.info('Loading ' + out_file) out_files.append(out_file) http_request.close() continue if user == None: download_url = 'https://lasp.colorado.edu/mms/sdc/public/files/api/v1/download/ancillary?file=' + file[ 'file_name'] else: download_url = 'https://lasp.colorado.edu/mms/sdc/sitl/files/api/v1/download/ancillary?file=' + file[ 'file_name'] logging.info('Downloading ' + file['file_name'] + ' to ' + out_dir) with warnings.catch_warnings(): warnings.simplefilter("ignore", category=ResourceWarning) fsrc = sdc_session.get(download_url, stream=True, verify=True) ftmp = NamedTemporaryFile(delete=False) with open(ftmp.name, 'wb') as f: copyfileobj(fsrc.raw, f) if not os.path.exists(out_dir): os.makedirs(out_dir) # if the download was successful, copy to data directory copy(ftmp.name, out_file) out_files.append(out_file) fsrc.close() ftmp.close() if download_only: continue if filetype == 'eph': mms_load_eph_tplot(sorted(out_files), level=level, probe=probe_id, datatypes=datatypes, suffix=suffix, trange=trange) elif filetype == 'att': mms_load_att_tplot(sorted(out_files), level=level, probe=probe_id, datatypes=datatypes, suffix=suffix, trange=trange) if not no_download: sdc_session.close()
def mms_load_data(trange=['2015-10-16', '2015-10-17'], probe='1', data_rate='srvy', level='l2', instrument='fgm', datatype='', prefix='', suffix='', get_support_data=False, time_clip=False): """ This function loads MMS data into pyTplot variables """ if not isinstance(probe, list): probe = [probe] if not isinstance(data_rate, list): data_rate = [data_rate] if not isinstance(level, list): level = [level] if not isinstance(datatype, list): datatype = [datatype] probe = [str(p) for p in probe] # allows the user to pass in trange as list of datetime objects if type(trange[0]) == datetime and type(trange[1]) == datetime: trange = [ time_string(trange[0].timestamp()), time_string(trange[1].timestamp()) ] start_date = parse(trange[0]).strftime( '%Y-%m-%d') # need to request full day, then parse out later end_date = parse(time_string(time_double(trange[1]) - 0.1)).strftime( '%Y-%m-%d-%H-%M-%S' ) # -1 second to avoid getting data for the next day download_only = CONFIG['download_only'] if not CONFIG['no_download']: sdc_session, user = mms_login_lasp() out_files = [] for prb in probe: for drate in data_rate: for lvl in level: for dtype in datatype: if user is None: url = 'https://lasp.colorado.edu/mms/sdc/public/files/api/v1/file_info/science?start_date=' + start_date + '&end_date=' + end_date + '&sc_id=mms' + prb + '&instrument_id=' + instrument + '&data_rate_mode=' + drate + '&data_level=' + lvl else: url = 'https://lasp.colorado.edu/mms/sdc/sitl/files/api/v1/file_info/science?start_date=' + start_date + '&end_date=' + end_date + '&sc_id=mms' + prb + '&instrument_id=' + instrument + '&data_rate_mode=' + drate + '&data_level=' + lvl if dtype != '': url = url + '&descriptor=' + dtype if CONFIG['debug_mode']: logging.info('Fetching: ' + url) if CONFIG['no_download'] == False: # query list of available files try: http_json = sdc_session.get(url, verify=True).json() if CONFIG['debug_mode']: logging.info( 'Filtering the results down to your trange' ) files_in_interval = mms_files_in_interval( http_json['files'], trange) for file in files_in_interval: file_date = parse(file['timetag']) if dtype == '': out_dir = os.sep.join([ CONFIG['local_data_dir'], 'mms' + prb, instrument, drate, lvl, file_date.strftime('%Y'), file_date.strftime('%m') ]) else: out_dir = os.sep.join([ CONFIG['local_data_dir'], 'mms' + prb, instrument, drate, lvl, dtype, file_date.strftime('%Y'), file_date.strftime('%m') ]) if drate.lower() == 'brst': out_dir = os.sep.join( [out_dir, file_date.strftime('%d')]) out_file = os.sep.join( [out_dir, file['file_name']]) if CONFIG['debug_mode']: logging.info('File: ' + file['file_name'] + ' / ' + file['timetag']) #if os.path.exists(out_file) and str(os.stat(out_file).st_size) == str(file['file_size']): if os.path.exists(out_file): if not download_only: logging.info('Loading ' + out_file) out_files.append(out_file) continue if user is None: download_url = 'https://lasp.colorado.edu/mms/sdc/public/files/api/v1/download/science?file=' + file[ 'file_name'] else: download_url = 'https://lasp.colorado.edu/mms/sdc/sitl/files/api/v1/download/science?file=' + file[ 'file_name'] logging.info('Downloading ' + file['file_name'] + ' to ' + out_dir) fsrc = sdc_session.get(download_url, stream=True, verify=True) ftmp = NamedTemporaryFile(delete=False) copyfileobj(fsrc.raw, ftmp) if not os.path.exists(out_dir): os.makedirs(out_dir) # if the download was successful, copy to data directory copy(ftmp.name, out_file) out_files.append(out_file) ftmp.close() fsrc.close() except requests.exceptions.ConnectionError: # No/bad internet connection; try loading the files locally logging.error('No internet connection!') if out_files == []: if not download_only: logging.info('Searching for local files...') out_files = mms_get_local_files( prb, instrument, drate, lvl, dtype, trange) if not CONFIG['no_download']: sdc_session.close() if not download_only: out_files = sorted(out_files) new_variables = cdf_to_tplot(out_files, merge=True, get_support_data=get_support_data, prefix=prefix, suffix=suffix) if new_variables == []: logging.warning('No data loaded.') return logging.info('Loaded variables:') for new_var in new_variables: print(new_var) if time_clip: tclip(new_var, trange[0], trange[1], suffix='') return new_variables