def test_time_double(self): """Test time_double function.""" t0 = time_float() t1 = time_float_one() t2 = time_float(1450181243) print("Time_double functions test:", t0, t1, t2) self.assertTrue(time_double('2015-12-15 12:07:23.767000') == 1450181243.767) self.assertTrue(time_double(['2015-12-15 12:07:23.767000', '2015-12-15 12:07:43.767000']) == [1450181243.767, 1450181263.767])
def test_time_double(self): """Test time_double function.""" self.assertTrue(time_string(time_double(), fmt='%Y-%m-%d') == datetime.now().strftime('%Y-%m-%d')) self.assertTrue(time_double('2015-12-15/12:00') == 1450180800.0000000) self.assertTrue(time_double('2015-12-15/12') == 1450180800.0000000) #self.assertTrue(time_double('2015-12-15/6') == 1450159200.0000000) #this one doesn't work self.assertTrue(time_double('2015-12-15/6:00') == 1450159200.0000000) self.assertTrue(time_double('2015-12-15/06:00') == 1450159200.0000000) self.assertTrue(time_double('2015-12-15') == 1450137600.0000000) self.assertTrue(time_double('2015 12 15') == 1450137600.0000000) self.assertTrue(time_double('2015-12') == 1448928000.0000000) self.assertTrue(time_double('2015') == 1420070400.0000000) self.assertTrue(time_double('2015-12-15 12:07:23.767000') == 1450181243.767) self.assertTrue(time_double(['2015-12-15 12:07:23.767000', '2015-12-15 12:07:43.767000']) == [1450181243.767, 1450181263.767])
def read_data_files(out_files=None, dtype=None, out_type='np', save_pickle=False): """ Read data on a daily basis with a 10-secs or other resolution :param out_files: the string list of the downloaded data files' path. :param out_type: the return type: 'np': numpy array; 'df': pandas dataframe; 'dc': dictionary :param dtype: the data which will be read ('EICS' or 'SECS') :return: a numpy nd-array acrossing one or multiple days. """ file_names_arr_Dir = out_files start_time = time.time() # Reading the data at each time stamp (per resolution secs) on one specific date. # input the data into one pd data frame. (four columns) if out_type == 'df': if dtype == 'EICS': colnames = ['latitude', 'longitude', 'Jx', 'Jy'] if dtype == 'SECS': colnames = ['latitude', 'longitude', 'J'] data_all = [] for idx, file in enumerate(file_names_arr_Dir): df = pd.read_csv(file, header=None, sep='\s+', skiprows=0, names=colnames) df['datetime'] = file[-19:-4] data_all.append(df) output = pd.concat(data_all, axis=0, ignore_index=True) elif out_type == 'np': latitude = [] longitude = [] date_time = [] if dtype == 'EICS': Jx = [] Jy = [] for file in file_names_arr_Dir: di = np.loadtxt(file) num_row = np.shape(di)[0] latitude.extend(di[:, 0]) longitude.extend(di[:, 1]) Jx.extend(di[:, 2]) Jy.extend(di[:, 3]) date_time.extend(np.full((num_row, 1), file[-19:-4])) num_row2 = len(latitude) data_all = np.array([latitude, longitude, Jx, Jy, date_time]) data_all = data_all.reshape([5, num_row2]) data_all = np.transpose(data_all) if dtype == 'SECS': J = [] for file in file_names_arr_Dir: di = np.loadtxt(file) num_row = np.shape(di)[0] latitude.extend(di[:, 0]) longitude.extend(di[:, 1]) J.extend(di[:, 2]) date_time.extend(np.full((num_row, 1), file[-19:-4])) num_row2 = len(latitude) data_all = np.array([latitude, longitude, J, date_time]) data_all = data_all.reshape([4, num_row2]) data_all = np.transpose(data_all) output = data_all elif out_type == 'dc': data_dict = {} Jx = [] Jy = [] J = [] date_time = [] flag = 0 filename_day1 = file_names_arr_Dir[0] for idx, file in enumerate( file_names_arr_Dir): # per dat file with 1 min resolution. if not os.path.isfile(file): continue # jump ouf of the current iteration, into the next iteration of the same loop. if os.stat(file).st_size == 0: # check if the file is empty. continue di = np.loadtxt(file) if np.shape(di)[0] > 0 and flag == 0: num_row = np.shape(di)[0] # np array latitude = di[:, 0] # np array longitude = di[:, 1] # np array flag = 1 if dtype == 'EICS': Jx.append(di[:, 2]) # list [np.arrays] Jy.append(di[:, 3]) # list [np.arrays] if dtype == 'SECS': J.append(di[:, 2]) # list [np.arrays] date_time.append(file[-19:-4]) # list of str date_time = np.array(date_time) # np array of str date_time = time_double(date_time) # np array of float if dtype == 'EICS': Jx = np.vstack(Jx) # np array Jy = np.vstack(Jy) # np array data_dict = { 'time': date_time, 'latitude': latitude, 'longitude': longitude, 'Jx': Jx, 'Jy': Jy } if dtype == 'SECS': J = np.vstack(J) # np array data_dict = { 'time': date_time, 'latitude': latitude, 'longitude': longitude, 'J': J } output = data_dict else: raise TypeError("%r are invalid keyword arguments" % out_type) if save_pickle == True: if out_type == 'dc': # too large, not useful. with open('data_dc.pkl', 'wb') as f: pickle.dump(output, f) # f.close() logging.info('running time of output ' + out_type + ": --- %s seconds ---" % (time.time() - start_time)) return output
def cl_format_time(s): """Return a string formated for Cluster web services.""" # Date format: YYYY-MM-DDThh:mm:ssZ r = time_string(time_double(s), "%Y-%m-%dT%H:%M:%SZ") return r
# e.g., # >>> times[0] # 1444953613.330852 # >>> data[0] # array([ 8580.49 , 7339.21 , 6250.034, 12905.493], dtype=float32) # convert the unix time to a string from pyspedas.utilities.time_string import time_string print(time_string(1444953613.330852)) from pyspedas.utilities.time_double import time_double print(time_double('2015-10-16 00:00:13.330852')) # create new tplot variables store_data('b_gsm_vec', data={'x': times, 'y': data[:, 0:3]}) # B-field vector store_data('b_gsm_mag', data={ 'x': times, 'y': data[:, 3] }) # B-field magnitude tplot(['b_gsm_mag', 'b_gsm_vec']) # modify variable metadata from pytplot import options