def __init__(self, inlst): dt = zip(*inlst)[0] data = np.hstack(zip(*inlst)[1]).reshape((-1, 1)) dat = dm.SpaceData() dat['time'] = dm.dmarray(data[:,0]) dat['time'].attrs['CATDESC'] = 'Start or stop time' dat['time'].attrs['FIELDNAM'] = 'time' dat['time'].attrs['LABLAXIS'] = 'Start or stop time' dat['time'].attrs['SCALETYP'] = 'linear' #dat['time'].attrs['UNITS'] = 'none' dat['time'].attrs['UNITS'] = 'ms' dat['time'].attrs['VALIDMIN'] = datetime.datetime(1990,1,1) dat['time'].attrs['VALIDMAX'] = datetime.datetime(2029,12,31,23,59,59,999000) dat['time'].attrs['VAR_TYPE'] = 'support_data' dat['time'].attrs['VAR_NOTES'] = 'Time data started or stopped' dat['time'].attrs['DEPEND_0'] = 'Epoch' dat['time'].attrs['FILLVAL'] = 'None' dat['Epoch'] = dm.dmarray(dt) dat['Epoch'].attrs['CATDESC'] = 'Default Time' dat['Epoch'].attrs['FIELDNAM'] = 'Epoch' #dat['Epoch'].attrs['FILLVAL'] = datetime.datetime(2100,12,31,23,59,59,999000) dat['Epoch'].attrs['LABLAXIS'] = 'Epoch' dat['Epoch'].attrs['SCALETYP'] = 'linear' dat['Epoch'].attrs['UNITS'] = 'ms' dat['Epoch'].attrs['VALIDMIN'] = datetime.datetime(1990,1,1) dat['Epoch'].attrs['VALIDMAX'] = datetime.datetime(2029,12,31,23,59,59,999000) dat['Epoch'].attrs['VAR_TYPE'] = 'support_data' dat['Epoch'].attrs['TIME_BASE'] = '0 AD' dat['Epoch'].attrs['MONOTON'] = 'INCREASE' dat['Epoch'].attrs['VAR_NOTES'] = 'Epoch at each configuration point' self.data = dat
def FIRE_HiRes_L1_L2(datafile, ephemfile): full_data = dm.readJSONheadedASCII(datafile) ephem = dm.readJSONheadedASCII(ephemfile) data = Trim_data_file(full_data, ephem) labels = ephem.keys() ephem_fields = ['Lsimple', 'CDMAG_MLT'] dt = spt.Ticktock(data['Epoch']).TAI et = spt.Ticktock(ephem['DateTime']).TAI for i in range(len(ephem_fields)): print ephem_fields[i] y = ephem[ephem_fields[i]] nx = tb.interpol(dt, et, y) data[ephem_fields[i]] = dm.dmarray(nx) ephem_lat = ephem['Rgeod_LatLon'][:,0] ephem_lon = ephem['Rgeod_LatLon'][:,1] nx = tb.interpol(dt, et, ephem_lat) data['Lat'] = dm.dmarray(nx) nx = tb.interpol(dt, et, ephem_lon) data['Lon'] = dm.dmarray(nx) n_lines = len(data['Epoch']) eflux = np.zeros(n_lines,12) day = ephem['DateTime'][0][0:10] outfile = datafile[:-23] + day + '-HiRes_L2.txt' dm.toJSONheadedASCII(outfile, data) return data
def test_resample1(self): '''resample should give consistent results''' ans = dm.SpaceData() ans.attrs['foo'] = 'bar' ans['a'] = [ 0.5, 2.5, 4.5, 6.5, 8.5] ans['b'] = dm.dmarray([4.5, 6.5, 8.5, 10.5, 12.5]) ans['b'].attrs['marco'] = 'polo' ans['Epoch'] = [datetime.datetime(2010, 1, 1, 1, 0), datetime.datetime(2010, 1, 1, 3, 0), datetime.datetime(2010, 1, 1, 5, 0), datetime.datetime(2010, 1, 1, 7, 0), datetime.datetime(2010, 1, 1, 9, 0)] a = dm.SpaceData() a['a'] = dm.dmarray(range(10)) #sequence 0 through 9 a['b'] = dm.dmarray(range(10)) + 4 #sequence 4 through 13 a['b'].attrs['marco'] = 'polo' a['c'] = dm.dmarray(range(3)) + 10 a.attrs['foo'] = 'bar' times = [datetime.datetime(2010, 1, 1) + datetime.timedelta(hours=i) for i in range(10)] out = dm.resample(a, times, winsize=datetime.timedelta(hours=2), overlap=datetime.timedelta(hours=0)) #starting from element 0, step two hours (points) with no overlap and average #therefore each resulting value should be the mean of a pair of points #that is, ans['a'][0] should be the mean of 0 and 1 for k, v in out.items(): np.testing.assert_equal(v, ans[k]) self.assertEqual(ans.attrs, out.attrs) self.assertEqual(ans['b'].attrs['marco'], 'polo') self.assertTrue(out['b'].attrs['DEPEND_0'], 'Epoch') self.assertFalse('c' in out)
def test_resample3(self): '''resample should give consistent results (2d)''' ans = {} ans['a'] = [[1., 2.], [5., 6.], [9., 10.], [13., 14.], [17., 18.]] ans['b'] = [4.5, 6.5, 8.5, 10.5, 12.5] ans['Epoch'] = [ datetime.datetime(2010, 1, 1, 1, 0), datetime.datetime(2010, 1, 1, 3, 0), datetime.datetime(2010, 1, 1, 5, 0), datetime.datetime(2010, 1, 1, 7, 0), datetime.datetime(2010, 1, 1, 9, 0) ] a = dm.SpaceData() a['a'] = dm.dmarray(range(10 * 2)).reshape(10, 2) a['b'] = dm.dmarray(range(10)) + 4 a['c'] = dm.dmarray(range(3)) + 10 times = [ datetime.datetime(2010, 1, 1) + datetime.timedelta(hours=i) for i in range(10) ] out = dm.resample(a, times, winsize=datetime.timedelta(hours=2), overlap=datetime.timedelta(hours=0)) for k, v in out.items(): np.testing.assert_equal(v, ans[k])
def test_toJSONheadedASCII(self): """Write known datamodel to JSON-headed ASCII and ensure it has right stuff added""" a = dm.SpaceData() a.attrs['Global'] = 'A global attribute' a['Var1'] = dm.dmarray([1, 2, 3, 4, 5], attrs={'Local1': 'A local attribute'}) a['Var2'] = dm.dmarray([[8, 9], [9, 1], [3, 4], [8, 9], [7, 8]]) a['MVar'] = dm.dmarray([7.8], attrs={'Note': 'Metadata'}) t_file = tempfile.NamedTemporaryFile(delete=False) t_file.close() dm.toJSONheadedASCII(t_file.name, a, depend0='Var1', order=['Var1', 'Var2']) dat2 = dm.readJSONheadedASCII(t_file.name) #test global attr self.assertTrue(a.attrs == dat2.attrs) #test that metadata is back and all original keys are present for key in a['MVar'].attrs: self.assertTrue(key in dat2['MVar'].attrs) np.testing.assert_array_equal(a['MVar'], dat2['MVar']) #test vars are right np.testing.assert_almost_equal(a['Var1'], dat2['Var1']) np.testing.assert_almost_equal(a['Var2'], dat2['Var2']) #test for added dimension and start col self.assertTrue(dat2['Var1'].attrs['DIMENSION'] == [1]) self.assertTrue(dat2['Var2'].attrs['DIMENSION'] == [2]) os.remove(t_file.name)
def test_resample1(self): '''resample should give consistent results''' ans = dm.SpaceData() ans.attrs['foo'] = 'bar' ans['a'] = [ 1., 3., 5., 7.] ans['b'] = dm.dmarray([5., 7., 9., 11.]) ans['b'].attrs['marco'] = 'polo' ans['Epoch'] = [datetime.datetime(2010, 1, 1, 1, 0), datetime.datetime(2010, 1, 1, 3, 0), datetime.datetime(2010, 1, 1, 5, 0), datetime.datetime(2010, 1, 1, 7, 0)] a = dm.SpaceData() a['a'] = dm.dmarray(range(10)) a['b'] = dm.dmarray(range(10)) + 4 a['b'].attrs['marco'] = 'polo' a['c'] = dm.dmarray(range(3)) + 10 a.attrs['foo'] = 'bar' times = [datetime.datetime(2010, 1, 1) + datetime.timedelta(hours=i) for i in range(10)] out = dm.resample(a, times, winsize=datetime.timedelta(hours=2), overlap=datetime.timedelta(hours=0)) for k, v in out.items(): np.testing.assert_equal(v, ans[k]) self.assertEqual(ans.attrs, out.attrs) self.assertEqual(ans['b'].attrs['marco'], 'polo') self.assertTrue(out['b'].attrs['DEPEND_0'], 'Epoch') self.assertFalse('c' in out)
def test_resample2(self): '''resample should give consistent results (ticktock)''' ans = {} ans['a'] = [0.5, 2.5, 4.5, 6.5, 8.5] ans['b'] = [4.5, 6.5, 8.5, 10.5, 12.5] ans['Epoch'] = [ datetime.datetime(2010, 1, 1, 1, 0), datetime.datetime(2010, 1, 1, 3, 0), datetime.datetime(2010, 1, 1, 5, 0), datetime.datetime(2010, 1, 1, 7, 0), datetime.datetime(2010, 1, 1, 9, 0) ] #For justification of test results see test above (test_resample1) a = dm.SpaceData() a['a'] = dm.dmarray(range(10)) a['b'] = dm.dmarray(range(10)) + 4 a['c'] = dm.dmarray(range(3)) + 10 times = spt.Ticktock([ datetime.datetime(2010, 1, 1) + datetime.timedelta(hours=i) for i in range(10) ]) out = dm.resample(a, times, winsize=datetime.timedelta(hours=2), overlap=datetime.timedelta(hours=0)) for k, v in out.items(): np.testing.assert_equal(v, ans[k])
def test_defaults(self): """run it and check that defaults were set correctly""" a = spectrogram(self.data, variables=self.kwargs['variables']) ans = {'bins': [dm.dmarray([ 730120.0, 730135.30769231, 730150.61538462, 730165.92307692, 730181.23076923, 730196.53846154, 730211.84615385, 730227.15384615, 730242.46153846, 730257.76923077, 730273.07692308, 730288.38461538, 730303.69230769, 730319. ]), dm.dmarray([ 0.00169679, 0.07848775, 0.1552787 , 0.23206965, 0.30886061, 0.38565156, 0.46244251, 0.53923347, 0.61602442, 0.69281538, 0.76960633, 0.84639728, 0.92318824, 0.99997919])], 'variables': ['xval', 'yval', 'zval'], 'ylim': (0.0012085702179961411, 0.99323954710300699), 'zlim': (0.001696792515639145, 0.99997919064162388)} for key in ans: if key == 'variables': self.assertEqual(a.specSettings[key], ans[key]) else: if key == 'bins': # np.testing.assert_allclose(a.specSettings[key], ans[key], atol=1e-2, rtol=1e-3) np.testing.assert_almost_equal(a.specSettings[key], ans[key], decimal=2) else: # np.testing.assert_allclose(a.specSettings[key], ans[key], rtol=1e-5) np.testing.assert_almost_equal(a.specSettings[key], ans[key], decimal=6) self.assertRaises(NotImplementedError, a.add_data, self.data)
def _parse(self, lines): ''' Given raw ascii input as a list of lines, parse into object. ''' if lines[0][0:7] == ' Format': #IAGA-2002 formatted file. data = parse_iaga(lines, iagacode='DST') for key in data: self[key] = data[key] self['dst'].attrs['units'] = 'nT' self.attrs['npts'] = data['time'].size return self.attrs['npts'] = len(lines) time = [] dst = np.zeros(24 * self.attrs['npts']) for i, line in enumerate(lines): # Get year, month, day. try: yy = int(line[14:16]) * 100 except: yy = 1900 yy = yy + int(line[3:5]) dd = int(line[8:10]) mm = int(line[5:7]) # Parse the rest of the data. for j in range(0, 24): time.append(dt.datetime(yy, mm, dd, j)) loc = 20 + 4 * j dst[24 * i + j] = float(line[loc:loc + 4]) self['time'] = dmarray(time) self['dst'] = dmarray(dst, attrs={'units': 'nT'})
def _parse(self, lines): ''' Given raw ascii input as a list of lines, parse into object. ''' if lines[0][0:7]==' Format': #IAGA-2002 formatted file. data=parse_iaga(lines, iagacode='DST') for key in data: self[key]=data[key] self['dst'].attrs['units']='nT' self.attrs['npts']=data['time'].size return self.attrs['npts']=len(lines) time = [] dst = np.zeros(24*self.attrs['npts']) for i,line in enumerate(lines): # Get year, month, day. try: yy = int(line[14:16]) * 100 except: yy = 1900 yy = yy + int(line[3:5]) dd = int(line[8:10]) mm = int(line[5:7 ]) # Parse the rest of the data. for j in range(0,24): time.append(dt.datetime(yy, mm, dd, j)) loc = 20 + 4*j dst[24*i + j] = float(line[loc:loc+4]) self['time']= dmarray(time) self['dst'] = dmarray(dst, attrs={'units':'nT'})
def test_defaults_extended(self): """run it and check that defaults were set correctly (extended_out)""" a = spectrogram(self.data, variables=self.kwargs['variables'], extended_out=True) ans = { 'bins': [ dm.dmarray([ 0.00120857, 0.07751865, 0.15382872, 0.2301388, 0.30644887, 0.38275895, 0.45906902, 0.5353791, 0.61168917, 0.68799925, 0.76430932, 0.8406194, 0.91692947, 0.99323955 ]), dm.dmarray([ 0.00169679, 0.07848775, 0.1552787, 0.23206965, 0.30886061, 0.38565156, 0.46244251, 0.53923347, 0.61602442, 0.69281538, 0.76960633, 0.84639728, 0.92318824, 0.99997919 ]) ], 'variables': ['xval', 'yval', 'zval'], 'xlim': (0.0012085702179961411, 0.99323954710300699), 'ylim': (0.001696792515639145, 0.99997919064162388), 'zlim': (0.012544022260691956, 0.99059103521121727) } for key in ans: if key == 'variables': self.assertEqual(a.specSettings[key], ans[key]) else: # np.testing.assert_allclose(a.specSettings[key], ans[key], rtol=1e-5) np.testing.assert_almost_equal(a.specSettings[key], ans[key], decimal=8)
def __init__(self,filename,*args,**kwargs): ''' Reads the data; sorts into arrays. ''' import datetime as dt from spacepy.datamodel import dmarray from matplotlib.dates import date2num super(MltSlice, self).__init__(*args, **kwargs) # Init as PbData. self.attrs['file'] = filename f = open(filename, 'r') # Parse header. self.attrs['mlt'] = float(f.readline().split()[-1]) self['L'] = dmarray(np.array(f.readline().split()[1:], dtype=float), {'units':'$R_E$'}) # Parse remainder of file. lines = f.readlines() self['n'] = dmarray(np.zeros([len(lines), len(self['L'])]), {'units':'cm^{-3}'}) self['time'] = dmarray(np.zeros(len(lines), dtype=object)) for i,l in enumerate(lines): p = l.split() self['time'][i] = dt.datetime(int(p[0]), int(p[1]), int(p[2]), int(p[3]), int(p[4]), int(p[5]), int(p[6])*1000) self['n'][i,:] = p[7:] # Some "hidden" variables for plotting. self._dtime = date2num(self['time']) self._dy = self['L'][1] - self['L'][0]
def setUp(self): super(spectrogramDateTests, self).setUp() self.kwargs = {'variables': ['xval', 'yval', 'zval']} np.random.seed(8675309) self.data = dm.SpaceData( xval=dm.dmarray([datetime.datetime(2000, 1, 1) + datetime.timedelta(days=nn) for nn in range(200)]), yval=dm.dmarray(np.random.random_sample(200)), zval=dm.dmarray(np.random.random_sample(200)))
def setUp(self): super(spectrogramTests, self).setUp() self.kwargs = {} self.kwargs['variables'] = ['xval', 'yval', 'zval'] np.random.seed(8675309) self.data = dm.SpaceData(xval=dm.dmarray(np.random.random_sample(200)), yval=dm.dmarray(np.random.random_sample(200)), zval=dm.dmarray(np.random.random_sample(200)))
def setUp(self): super(spectrogramDateTests, self).setUp() self.kwargs = {} self.kwargs['variables'] = ['xval', 'yval', 'zval'] np.random.seed(8675309) self.data = dm.SpaceData(xval = dm.dmarray([dt.datetime(2000,1,1)+dt.timedelta(days=nn) for nn in range(200)]), yval = dm.dmarray(np.random.random_sample(200)), zval = dm.dmarray(np.random.random_sample(200)))
def test_resample_shape(self): '''resample should give consistent results, 1d or 2d''' a = dm.SpaceData() a['a'] = dm.dmarray(range(10*3*4)).reshape(10,3,4) a['b'] = dm.dmarray(range(10)) + 4 a['c'] = dm.dmarray(range(3)) + 10 times = [datetime.datetime(2010, 1, 1) + datetime.timedelta(hours=i) for i in range(10)] self.assertRaises(IndexError, dm.resample, a, times, datetime.timedelta(hours=2), datetime.timedelta(hours=0))
def setUp(self): super(spectrogramTests, self).setUp() self.kwargs = {} self.kwargs['variables'] = ['xval', 'yval', 'zval'] np.random.seed(8675309) self.data = dm.SpaceData(xval = dm.dmarray(np.random.random_sample(200)), yval = dm.dmarray(np.random.random_sample(200)), zval = dm.dmarray(np.random.random_sample(200)))
def __init__(self, inlst): dt = zip(*inlst)[0] data = np.hstack(zip(*inlst)[1]).reshape((-1, 1)) dat = dm.SpaceData() dat['Time'] = dm.dmarray(data[:,0]) dat['Time'].attrs['CATDESC'] = 'Start or stop Time' dat['Time'].attrs['FIELDNAM'] = 'Time' dat['Time'].attrs['LABLAXIS'] = 'Start or stop Time' dat['Time'].attrs['SCALETYP'] = 'linear' #dat['Time'].attrs['UNITS'] = 'none' dat['Time'].attrs['VALIDMIN'] = datetime.datetime(1990,1,1) dat['Time'].attrs['VALIDMAX'] = datetime.datetime(2029,12,31,23,59,59,999000) dat['Time'].attrs['VAR_TYPE'] = 'support_data' dat['Time'].attrs['VAR_NOTES'] = 'Time data started or stopped' dat['Time'].attrs['DEPEND_0'] = 'Epoch' dat['Time'].attrs['FILLVAL'] = 'None' dat['Epoch'] = dm.dmarray(dt) dat['Epoch'].attrs['CATDESC'] = 'Default Time' dat['Epoch'].attrs['FIELDNAM'] = 'Epoch' #dat['Epoch'].attrs['FILLVAL'] = datetime.datetime(2100,12,31,23,59,59,999000) dat['Epoch'].attrs['LABLAXIS'] = 'Epoch' dat['Epoch'].attrs['SCALETYP'] = 'linear' dat['Epoch'].attrs['VALIDMIN'] = datetime.datetime(1990,1,1) dat['Epoch'].attrs['VALIDMAX'] = datetime.datetime(2029,12,31,23,59,59,999000) dat['Epoch'].attrs['VAR_TYPE'] = 'support_data' dat['Epoch'].attrs['TIME_BASE'] = '0 AD' dat['Epoch'].attrs['MONOTON'] = 'INCREASE' dat['Epoch'].attrs['VAR_NOTES'] = 'Epoch at each configuration point' dat['Mode'] = dm.dmarray(np.zeros(len(dt), dtype=int)) dat['Mode'][...] = -1 dat['Mode'].attrs['FIELDNAM'] = 'Mode' dat['Mode'].attrs['FILLVAL'] = -1 dat['Mode'].attrs['LABLAXIS'] = 'FIRE Mode' dat['Mode'].attrs['SCALETYP'] = 'linear' dat['Mode'].attrs['VALIDMIN'] = 0 dat['Mode'].attrs['VALIDMAX'] = 1 dat['Mode'].attrs['VAR_TYPE'] = 'support_data' dat['Mode'].attrs['VAR_NOTES'] = 'Is the line FIRE on (=1) or FIRE off (=0)' dat['Mode'][::2] = 1 dat['Mode'][1::2] = 0 dat['Duration'] = dm.dmarray(np.zeros(len(dt), dtype=int)) dat['Duration'][...] = -1 dat['Duration'].attrs['FIELDNAM'] = 'Duration' dat['Duration'].attrs['FILLVAL'] = -1 dat['Duration'].attrs['LABLAXIS'] = 'FIRE Duration' dat['Duration'].attrs['SCALETYP'] = 'linear' dat['Duration'].attrs['VALIDMIN'] = 0 dat['Duration'].attrs['VALIDMAX'] = 100000 dat['Duration'].attrs['VAR_TYPE'] = 'support_data' dat['Duration'].attrs['VAR_NOTES'] = 'Duration of the on or off' df = np.asarray([ v1 - v2 for v1, v2 in zip(dat['Time'],dat['Epoch']) ]) dat['Duration'][...] = np.asarray([ v.days*24*60*60 + v.seconds for v in df ]) self.data = dat
def __init__(self, inlst): if not inlst: print("** No packets decoded cannot continue **") sys.exit(1) dt = zip(*inlst)[0] data = np.hstack(zip(*inlst)[1]).reshape((-1, 2)) # go through Context and change the data type and set the None to fill tmp = np.zeros(data.shape, dtype=int) for (i, j), val in np.ndenumerate(data): try: tmp[i,j] = val except (TypeError, ValueError): tmp[i,j] = -2**16-1 dat = dm.SpaceData() dat['Context'] = dm.dmarray(tmp[:]) dat['Context'].attrs['CATDESC'] = 'Context data' dat['Context'].attrs['FIELDNAM'] = 'Context' dat['Context'].attrs['ELEMENT_LABELS'] = "Det_0", "Det_1", dat['Context'].attrs['ELEMENT_NAMES'] = "Det_0", "Det_1", dat['Context'].attrs['LABEL'] = 'Context data' dat['Context'].attrs['SCALE_TYPE'] = 'log' #dat['time'].attrs['UNITS'] = 'none' dat['Context'].attrs['UNITS'] = '' dat['Context'].attrs['VALID_MIN'] = 0 dat['Context'].attrs['VALID_MAX'] = 2**15-1 dat['Context'].attrs['VAR_TYPE'] = 'data' dat['Context'].attrs['VAR_NOTES'] = 'Context data 6s average' dat['Context'].attrs['DEPEND_0'] = 'Epoch' dat['Context'].attrs['FILL_VALUE'] = -2**16-1 dat['Epoch'] = dm.dmarray(dt) dat['Epoch'].attrs['CATDESC'] = 'Default Time' dat['Epoch'].attrs['FIELDNAM'] = 'Epoch' #dat['Epoch'].attrs['FILLVAL'] = datetime.datetime(2100,12,31,23,59,59,999000) dat['Epoch'].attrs['LABEL'] = 'Epoch' dat['Epoch'].attrs['SCALE_TYPE'] = 'linear' # dat['Epoch'].attrs['VALID_MIN'] = datetime.datetime(1990,1,1) # dat['Epoch'].attrs['VALID_MAX'] = datetime.datetime(2029,12,31,23,59,59,999000) dat['Epoch'].attrs['VAR_TYPE'] = 'support_data' dat['Epoch'].attrs['TIME_BASE'] = '0 AD' dat['Epoch'].attrs['MONOTON'] = 'INCREASE' dat['Epoch'].attrs['VAR_NOTES'] = 'Epoch at each configuration point' # go through and remove duplicate times and data print("Looking for duplicate measurements") arr, dt_ind, return_inverse = np.unique(dat['Epoch'], return_index=True, return_inverse=True) # this is unique an sort print("Found {0} duplicates of {1}".format(len(return_inverse)-len(dt_ind), len(return_inverse))) dat['Epoch'] = arr dat['Context'] = dat['Context'][dt_ind] self.data = dat
def test_toRecArray(self): '''a record array can be created from a SpaceData''' sd = dm.SpaceData() sd['x'] = dm.dmarray([1.0, 2.0]) sd['y'] = dm.dmarray([2,4]) ra = dm.toRecArray(sd) np.testing.assert_equal(ra['x'], [1.0, 2.0]) np.testing.assert_equal(ra['y'], [2, 4]) self.assertEqual(ra.dtype, np.dtype((np.record, [('x', '<f8'), ('y', '<i8'), ])))
def test_toRecArray_dtypes1(self): '''recarray created from dmarray preserves data types (32-bit)''' sd = dm.SpaceData() sd['x'] = dm.dmarray([1.0, 2.0], dtype=np.float32) sd['y'] = dm.dmarray([2,4], dtype=np.int32) ra = dm.toRecArray(sd) expected = [sd[key].dtype for key in sd] got = [ra.dtype[name] for name in ra.dtype.names] self.assertEqual(expected, got)
def test_toRecArray_contents(self): '''a record array can be created from a SpaceData, keys and values equal''' sd = dm.SpaceData() sd['x'] = dm.dmarray([1.0, 2.0]) sd['y'] = dm.dmarray([2,4]) ra = dm.toRecArray(sd) np.testing.assert_equal(ra['x'], [1.0, 2.0]) np.testing.assert_equal(ra['y'], [2, 4]) self.assertEqual(['x', 'y'], sorted(ra.dtype.fields))
def GSMtoMLT(gsm, dt): """ convert GSM values to MLT in the lgm way Parameters ---------- gsm : array_like Nx3 array_like of the GSM position dt : array_like N elementarray_like of datetime objects Returns ------- out : numpy.array N element array of the MLT values """ def doConv(gsm, dt): Pgsm = Lgm_Vector.Lgm_Vector(*gsm) Pwgs = Lgm_Vector.Lgm_Vector() Pmlt = Lgm_Vector.Lgm_Vector() cT = pointer(Lgm_CTrans()) Lgm_Set_Coord_Transforms(dateToDateLong(dt), dateToFPHours(dt), cT) Lgm_Convert_Coords(pointer(Pgsm), pointer(Pwgs), GSM_TO_WGS84, cT) Lgm_Convert_Coords(pointer(Pwgs), pointer(Pmlt), WGS84_TO_EDMAG, cT) R, MLat, MLon, MLT = c_double(), c_double(), c_double(), c_double(), Lgm_EDMAG_to_R_MLAT_MLON_MLT(pointer(Pmlt), pointer(R), pointer(MLat), pointer(MLon), pointer(MLT), cT) return MLT.value gsm_ = numpy.asanyarray(gsm) if isinstance(dt, datetime.datetime): dt_ = numpy.asanyarray([dt]) else: dt_ = numpy.asanyarray(dt) if gsm_.ndim == 2: if gsm_.shape[1] != 3: raise (ValueError("Invalid vector shape")) if gsm_.shape[0] != dt_.size: if dt_.size == 1: dt_ = dm.dmarray([dt_] * gsm_.shape[0]) else: raise (ValueError("Array size mismatch")) ans = dm.dmarray(numpy.empty(len(dt_)), dtype=numpy.double, attrs={'coord_system': 'EDMAG'}) for ii, (gsm_val, dt_val) in enumerate(itertools.izip(gsm_, dt_)): ans[ii] = doConv(gsm_val, dt_val) else: if dt_.size == 1: ans = dm.dmarray([doConv(gsm_, dt_)], attrs={'coord_system': 'EDMAG'}) else: ans = dm.dmarray(doConv(gsm_, dt_), attrs={'coord_system': 'EDMAG'}) return ans
def test_HDF5roundtrip2GZIP(self): """Data can go to hdf without altering datetimes in the datamodel with compression""" a = dm.SpaceData() a['foo'] = dm.SpaceData() dm.toHDF5(self.testfile, a, compression='gzip') newobj = dm.fromHDF5(self.testfile) self.assertEqual(a['foo'], newobj['foo']) a['bar'] = dm.dmarray([datetime.datetime(2000, 1, 1)]) dm.toHDF5(self.testfile, a, compression='gzip') self.assertEqual(a['bar'], dm.dmarray([datetime.datetime(2000, 1, 1)]))
def test_toRecArray_dtypes2(self): '''recarray created from dmarray preserves data types (16-bit+str)''' sd = dm.SpaceData() sd['x'] = dm.dmarray([1.0, 2.0], dtype=np.float16) sd['y'] = dm.dmarray([2,4], dtype=np.int16) sd['str'] = dm.dmarray(['spam', 'eggs'], dtype='|S5') ra = dm.toRecArray(sd) expected = [sd[key].dtype for key in sd] got = [ra.dtype[name] for name in ra.dtype.names] self.assertEqual(expected, got)
def singleRundBdt(runname, searchpatt='mag_grid_e20100405-0[89][0-5][0-9][03]0.out', outdir='dBdt_maps', links=True): useProject = ccrs.PlateCarree #Projection to use for plotting, e.g., ccrs.AlbersEqualArea plotprojection = useProject(central_longitude=-90.0) pdata = { 'plotvar': 'dBdth', 'dataprojection': ccrs.PlateCarree(), 'plotprojection': plotprojection, } #add transmission lines from ArcGIS shapefile #fname = 'txlines/Transmission_lines.shp' #txshapes = list(shapereader.Reader(fname).geometries()) #albers = ccrs.AlbersEqualArea(central_longitude=-96.0, # central_latitude=23.0, # standard_parallels=(29.5,45.5)) #txinpc = [plotprojection.project_geometry(ii, albers) for ii in txshapes] pdata['shapes'] = None #txinpc rundir = runname[4:] globterm = os.path.join(runname, 'RESULTS', rundir, 'GM', searchpatt) globterm = os.path.join(runname, searchpatt) tstep = 60 #30 #diff between subsequent files in seconds allfiles = sorted(glob.glob(globterm)) #startpoint, forward diff. infiles = allfiles #all except endpoints, central diff for idx, fname in enumerate(infiles[1:-1], start=1): minusone = bats.MagGridFile(infiles[idx - 1]) #, format='ascii') mdata = bats.MagGridFile(fname) #, format='ascii') plusone = bats.MagGridFile(infiles[idx + 1]) #, format='ascii') mdata['dBdtn'] = dm.dmarray(gmdtools.centralDiff( minusone['dBn'], plusone['dBn'], tstep), attrs={'units': 'nT/s'}) mdata['dBdte'] = dm.dmarray(gmdtools.centralDiff( minusone['dBe'], plusone['dBe'], tstep), attrs={'units': 'nT/s'}) mdata['dBdth'] = dm.dmarray(np.sqrt(mdata['dBdtn']**2 + mdata['dBdte']**2), attrs={'units': 'nT/s'}) ax = gmdtools.plotFilledContours(mdata, pdata, addTo=None) #windows can't handle colons in filenames... isotime = mdata.attrs['time'].isoformat() plt.savefig(os.path.join( outdir, r'{0}_{1}.png'.format(pdata['plotvar'], isotime.replace(':', ''))), dpi=300) plt.close() if links: gmdtools.makeSymlinks(outdir, kind='dBdt')
def calc_deg(self): """ Gitm defaults to radians for lat and lon, which is sometimes difficult to use. This method creates *dLat* and *dLon*, which is lat and lon in degrees. """ from numpy import pi if "Latitude" in self: self["dLat"] = dmarray(self["Latitude"] * 180.0 / pi, attrs={"units": "degrees"}) if "Longitude" in self: self["dLon"] = dmarray(self["Longitude"] * 180.0 / pi, attrs={"units": "degrees"})
def test_creation_dmarray(self): """When a dmarray is created it should have attrs empty or not""" self.assertTrue(hasattr(self.dat, 'attrs')) self.assertEqual(self.dat.attrs['a'], 'a') data = dm.dmarray([1,2,3]) self.assertTrue(hasattr(data, 'attrs')) self.assertEqual(data.attrs, {}) data2 = dm.dmarray([1,2,3], attrs={'coord':'GSM'}) self.assertEqual(data.attrs, {}) self.assertEqual(data2.attrs, {'coord':'GSM'}) data2 = dm.dmarray([1,2,3], dtype=float, attrs={'coord':'GSM'}) np.testing.assert_almost_equal([1,2,3], data2)
def _read(self, starttime): ''' Read ascii line file; should only be called upon instantiation. ''' # Slurp whole file. f = open(self.attrs['file'], 'r') lines = f.readlines() f.close() # Determine size of file. nTimes = lines.count(lines[0]) nAlts = int(lines[2].strip()) self.attrs['nAlt'] = nAlts self.attrs['nTime'] = nTimes # Start building time array. self['time'] = np.zeros(nTimes, dtype=object) # Get variable names; pop radius (altitude). var = (lines[4].split())[1:-1] self._rawvar = var # Get altitude, which is constant at all times. self['r'] = dmarray(np.zeros(nAlts), {'units': 'km'}) for i, l in enumerate(lines[5:nAlts + 5]): self['r'][i] = float(l.split()[0]) # Create 2D arrays for data that is time and alt. dependent. # Set as many units as possible. for v in var: self[v] = dmarray(np.zeros((nTimes, nAlts))) if v == 'Lat' or v == 'Lon': self[v].attrs['units'] = 'deg' elif v[0] == 'u': self[v].attrs['units'] = 'km/s' elif v[0:3] == 'lgn': self[v].attrs['units'] = 'log(cm-3)' elif v[0] == 'T': self[v].attrs['units'] = 'K' else: self[v].attrs['units'] = None # Loop through rest of data to fill arrays. for i in range(nTimes): t = float((lines[i * (nAlts + 5) + 1].split())[1]) self['time'][i] = starttime + dt.timedelta(seconds=t) for j, l in enumerate(lines[i * (nAlts + 5) + 5:(i + 1) * (nAlts + 5)]): parts = l.split() for k, v in enumerate(var): self[v][i, j] = float(parts[k + 1])
def GSMtoMLT(gsm, dt): """ convert GSM values to MLT in the lgm way Parameters ---------- gsm : array_like Nx3 array_like of the GSM position dt : array_like N elementarray_like of datetime objects Returns ------- out : numpy.array N element array of the MLT values """ def doConv(gsm, dt): Pgsm = Lgm_Vector.Lgm_Vector(*gsm) Pwgs = Lgm_Vector.Lgm_Vector() Pmlt = Lgm_Vector.Lgm_Vector() cT = pointer(Lgm_CTrans()) Lgm_Set_Coord_Transforms( dateToDateLong(dt), dateToFPHours(dt), cT) Lgm_Convert_Coords( pointer(Pgsm), pointer(Pwgs), GSM_TO_WGS84, cT ) Lgm_Convert_Coords( pointer(Pwgs), pointer(Pmlt), WGS84_TO_EDMAG, cT ) R, MLat, MLon, MLT = c_double(), c_double(), c_double(), c_double(), Lgm_EDMAG_to_R_MLAT_MLON_MLT( pointer(Pmlt), pointer(R), pointer(MLat), pointer(MLon), pointer(MLT), cT) return MLT.value gsm_ = numpy.asanyarray(gsm) if isinstance(dt, datetime.datetime): dt_ = numpy.asanyarray([dt]) else: dt_ = numpy.asanyarray(dt) if gsm_.ndim == 2: if gsm_.shape[1] != 3: raise(ValueError("Invalid vector shape")) if gsm_.shape[0] != dt_.size: if dt_.size == 1: dt_ = dm.dmarray([dt_]*gsm_.shape[0]) else: raise(ValueError("Array size mismatch")) ans = dm.dmarray(numpy.empty(len(dt_)), dtype=numpy.double, attrs={'coord_system': 'EDMAG'}) for ii, (gsm_val, dt_val) in enumerate(itertools.izip(gsm_, dt_)): ans[ii] = doConv(gsm_val, dt_val) else: if dt_.size==1: ans = dm.dmarray([doConv(gsm_, dt_)], attrs={'coord_system': 'EDMAG'}) else: ans = dm.dmarray(doConv(gsm_, dt_), attrs={'coord_system': 'EDMAG'}) return ans
def _read(self, starttime): ''' Read ascii line file; should only be called upon instantiation. ''' # Slurp whole file. f = open(self.attrs['file'], 'r') lines = f.readlines() f.close() # Determine size of file. nTimes = lines.count(lines[0]) nAlts = int(lines[2].strip()) self.attrs['nAlt'] = nAlts self.attrs['nTime'] = nTimes # Start building time array. self['time'] = np.zeros(nTimes, dtype=object) # Get variable names; pop radius (altitude). var = (lines[4].split())[1:-1] self._rawvar = var # Get altitude, which is constant at all times. self['r'] = dmarray(np.zeros(nAlts), {'units': 'km'}) for i, l in enumerate(lines[5:nAlts + 5]): self['r'][i] = float(l.split()[0]) # Create 2D arrays for data that is time and alt. dependent. # Set as many units as possible. for v in var: self[v] = dmarray(np.zeros((nTimes, nAlts))) if v == 'Lat' or v == 'Lon': self[v].attrs['units'] = 'deg' elif v[0] == 'u': self[v].attrs['units'] = 'km/s' elif v[0:3] == 'lgn': self[v].attrs['units'] = 'log(cm-3)' elif v[0] == 'T': self[v].attrs['units'] = 'K' else: self[v].attrs['units'] = None # Loop through rest of data to fill arrays. for i in range(nTimes): t = float((lines[i * (nAlts + 5) + 1].split())[1]) self['time'][i] = starttime + dt.timedelta(seconds=t) for j, l in enumerate( lines[i * (nAlts + 5) + 5:(i + 1) * (nAlts + 5)]): parts = l.split() for k, v in enumerate(var): self[v][i, j] = float(parts[k + 1])
def calc_deg(self): ''' Gitm defaults to radians for lat and lon, which is sometimes difficult to use. This method creates *dLat* and *dLon*, which is lat and lon in degrees. ''' from numpy import pi if 'Latitude' in self: self['dLat'] = dmarray(self['Latitude'] * 180.0 / pi, attrs={'units': 'degrees'}) if 'Longitude' in self: self['dLon'] = dmarray(self['Longitude'] * 180.0 / pi, attrs={'units': 'degrees'})
def getSkel(self): dat = dm.SpaceData() dat['Time'] = dm.dmarray([]) dat['Time'].attrs['CATDESC'] = 'Start or stop Time' dat['Time'].attrs['FIELDNAM'] = 'Time' dat['Time'].attrs['LABEL'] = 'Start or stop Time' dat['Time'].attrs['SCALE_TYPE'] = 'linear' #dat['Time'].attrs['UNITS'] = 'none' # dat['Time'].attrs['VALID_MIN'] = datetime.datetime(1990,1,1) # dat['Time'].attrs['VALID_MAX'] = datetime.datetime(2029,12,31,23,59,59,999000) dat['Time'].attrs['VAR_TYPE'] = 'support_data' dat['Time'].attrs['VAR_NOTES'] = 'Time data started or stopped' dat['Time'].attrs['DEPEND_0'] = 'Epoch' dat['Time'].attrs['FILL_VALUE'] = -1 dat['Epoch'] = dm.dmarray([]) dat['Epoch'].attrs['CATDESC'] = 'Default Time' dat['Epoch'].attrs['FIELDNAM'] = 'Epoch' #dat['Epoch'].attrs['FILLVAL'] = datetime.datetime(2100,12,31,23,59,59,999000) dat['Epoch'].attrs['LABEL'] = 'Epoch' dat['Epoch'].attrs['SCALE_TYPE'] = 'linear' # dat['Epoch'].attrs['VALID_MIN'] = datetime.datetime(1990,1,1) # dat['Epoch'].attrs['VALID_MAX'] = datetime.datetime(2029,12,31,23,59,59,999000) dat['Epoch'].attrs['VAR_TYPE'] = 'support_data' dat['Epoch'].attrs['TIME_BASE'] = '0 AD' dat['Epoch'].attrs['MONOTON'] = 'INCREASE' dat['Epoch'].attrs['VAR_NOTES'] = 'Epoch at each configuration point' dat['Mode'] = dm.dmarray([], dtype=int) dat['Mode'][...] = -1 dat['Mode'].attrs['FIELDNAM'] = 'Mode' dat['Mode'].attrs['FILL_VALUE'] = -1 dat['Mode'].attrs['LABEL'] = 'FIRE Mode' dat['Mode'].attrs['SCALE_TYPE'] = 'linear' dat['Mode'].attrs['VALID_MIN'] = 0 dat['Mode'].attrs['VALID_MAX'] = 1 dat['Mode'].attrs['VAR_TYPE'] = 'support_data' dat['Mode'].attrs['VAR_NOTES'] = 'Is the line FIRE on (=1) or FIRE off (=0)' dat['Duration'] = dm.dmarray([], dtype=float) dat['Duration'][...] = -1 dat['Duration'].attrs['FIELDNAM'] = 'Duration' dat['Duration'].attrs['FILL_VALUE'] = -1 dat['Duration'].attrs['LABEL'] = 'FIRE Duration' dat['Duration'].attrs['SCALE_TYPE'] = 'linear' dat['Duration'].attrs['VALID_MIN'] = 0 dat['Duration'].attrs['VALID_MAX'] = 100000 dat['Duration'].attrs['VAR_TYPE'] = 'support_data' dat['Duration'].attrs['VAR_NOTES'] = 'Duration of the on or off' return dat
def readIMP8plasmafile(fname): """ ftp://space.mit.edu/pub/plasma/imp/fine_res/1989/ yr doy hh mm ss sc decimal yr rg md xse yse zse ysm zsm speed thermal speed density E/W angle N/S angle mom nonlin mom nonlin mom nonlin mom best thresh threshs """ header = [] with open(fname, 'r') as fh: while True: pos = fh.tell() line = fh.readline().strip() if not line: # empty line, skip continue if not line[0].isdigit(): # this is header, save it header.append(line) else: # first line of data, roll back to start and pass to numpy fh.seek(pos) break data = np.loadtxt(fh) def toDate(yr, doy, hh, mm, ss): MM, DD = spt.doy2date(yr, doy) dates = dm.dmfilled(len(yr), fillval=None, dtype=object) for idx, (mon, day) in enumerate(zip(MM, DD)): dates[idx] = dt.datetime(yr[idx], mon, day, hh[idx], mm[idx], ss[idx]) return dates region = data[:, 7] outdata = dm.SpaceData(attrs={'header': header, 'fname': fname}) outdata['time'] = toDate(data[:, 0].astype(int), data[:, 1].astype(int), data[:, 2].astype(int), data[:, 3].astype(int), data[:, 4].astype(int)) outdata['region'] = dm.dmarray(region) outdata['pos_gse'] = dm.dmarray(data[:, 9:12], attrs={'coord_sys': 'gse'}) outdata['pos_gsm'] = dm.dmfilled(outdata['pos_gse'].shape, fillval=0, dtype=float, attrs={'coord_sys': 'gsm'}) outdata['pos_gsm'][:, 0] = data[:, 9] outdata['pos_gsm'][:, 1:] = data[:, 12:14] outdata['speed'] = dm.dmarray(data[:, 14], attrs={'description': 'speed from moments'}) # outdata['speed'][region > 2] = np.nan # region 3 is sheath outdata['speed_nl'] = dm.dmarray(data[:, 15]) vmask = outdata['speed_nl'] >= 9000 # outdata['speed_nl'][region > 2] = np.nan # region 3 is sheath outdata['speed_nl'][vmask] = np.nan # region 3 is sheath outdata['n_dens'] = dm.dmarray( data[:, 18], attrs={'description': 'number density from moments'}) outdata['n_dens_nl'] = dm.dmarray(data[:, 19]) outdata['temp'] = 60.5 * dm.dmarray(data[:, 16])**2 outdata['temp_nl'] = 60.5 * dm.dmarray(data[:, 17])**2 outdata['data'] = data return outdata
def _read(self, filename, starttime): ''' Read, parse, and load data into data structure. Should only be called by __init__. ''' from re import match, search from spacepy.pybats import parse_tecvars # Save time (starttime + run time) # Run time must be found in file name. m = search('Time(\d+)\.dat', filename) if m: self.attrs['runtime'] = int(m.group(1)) else: self.attrs['runtime'] = 0 self.attrs['time'] = starttime + dt.timedelta( seconds=self.attrs['runtime']) # Open file f = open(filename, 'r') # Parse header. varlist = parse_tecvars(f.readline()) m = search('.*I\=\s*(\d+)\,\s*J\=\s*(\d+)', f.readline()).groups() nLat, nLon = int(m[0]), int(m[1]) self.attrs['nLat'] = nLat self.attrs['nLon'] = nLon # Create arrays. for k, u in varlist: self[k] = dmarray(np.zeros((nLat * nLon)), attrs={'units': u}) for i in range(nLat * nLon): #for j in range(nLon): parts = f.readline().split() for k, (key, u) in enumerate(varlist): self[key][i] = float(parts[k]) # Lat, CoLat, and Lon: #self['lon'] = dmarray(np.pi/2 - np.arctan(self['y']/self['x']), # attrs={'units','deg'})*180.0/np.pi #self['lon'][self['x']==0.0] = 0.0 xy = np.sqrt(self['x']**2 + self['y']**2) + 0.0000001 self['lon'] = np.arcsin(self['y'] / xy) self['lat'] = dmarray(np.arcsin(self['z']), {'units': 'deg'}) * 180.0 / np.pi self['colat'] = 90.0 - self['lat'] f.close()
def getSolarProtonSpectra(norm=3.20e7, gamma=-0.96, E0=15.0, Emin=.1, Emax=600, nsteps=100): '''Returns a SpaceData with energy and fluence spectra of solar particle events The formulation follows that of: Ellison and Ramaty ApJ 298: 400-408, 1985 dJ/dE = K^{-\gamma}exp(-E/E0) and the defualt values are the 10/16/2003 SEP event of: Mewaldt, R. A., et al. (2005), J. Geophys. Res., 110, A09S18, doi:10.1029/2005JA011038. Other Parameters ================ norm : float Normilization factor for the intensity of the SEP event gamma : float Power law index E0 : float Expoential scaling factor Emin : float Minimum energy for fit Emax : float Maximum energy for fit nsteps : int The number of log spaced energy steps to return Returns ======= data : dm.SpaceData SpaceData with the energy and fluence values ''' E = tb.logspace(Emin, Emax, nsteps) fluence = norm * E**(gamma) * np.exp(-E / E0) ans = dm.SpaceData() ans['Energy'] = dm.dmarray(E) ans['Energy'].attrs = { 'UNITS': 'MeV', 'DESCRIPTION': 'Particle energy per nucleon' } ans['Fluence'] = dm.dmarray(fluence) ans['Fluence'].attrs = { 'UNITS': 'cm^{-2} sr^{-1} (MeV/nuc)^{-1}', 'DESCRIPTION': 'Fluence spectra fir to the model' } return ans
def test_add_data(self): """run it and check that add_data correctly""" data = dm.SpaceData(xval = dm.dmarray(np.arange(3)), yval = dm.dmarray(np.arange(3)), zval = dm.dmarray(np.arange(3))) xbins = np.arange(-0.5, 3.5, 2.0) ybins = np.arange(-0.5, 3.5, 2.0) a = Spectrogram(self.data, variables=self.kwargs['variables'], extended_out=True) count = a['spectrogram']['count'][:].copy() sm = a['spectrogram']['sum'][:].copy() spect = a['spectrogram']['spectrogram'][:].copy() a.add_data(self.data) # add te same data back, sum, count will double, spectrogram stays the same np.testing.assert_almost_equal(a['spectrogram']['count'].filled(), (count*2).filled()) np.testing.assert_almost_equal(a['spectrogram']['sum'], sm*2) np.testing.assert_almost_equal(a['spectrogram']['spectrogram'], spect)
def test_add_data(self): """run it and check that add_data correctly""" data = dm.SpaceData(xval = dm.dmarray(np.arange(3)), yval = dm.dmarray(np.arange(3)), zval = dm.dmarray(np.arange(3))) xbins = np.arange(-0.5, 3.5, 2.0) ybins = np.arange(-0.5, 3.5, 2.0) a = spectrogram(self.data, variables=self.kwargs['variables'], extended_out=True) count = a['spectrogram']['count'][:].copy() sm = a['spectrogram']['sum'][:].copy() spect = a['spectrogram']['spectrogram'][:].copy() a.add_data(self.data) # add te same data back, sum, count will double, spectrogram stays the same np.testing.assert_almost_equal(a['spectrogram']['count'].filled(), (count*2).filled()) np.testing.assert_almost_equal(a['spectrogram']['sum'], sm*2) np.testing.assert_almost_equal(a['spectrogram']['spectrogram'], spect)
def __init__(self, inlst): dt = zip(*inlst)[0] data = np.hstack(zip(*inlst)[1]).reshape((-1, 2)) dat = dm.SpaceData() dat['Burst1'] = dm.dmarray(data[:, 0]) dat['Burst1'].attrs['CATDESC'] = 'Burst parameter ch1' dat['Burst1'].attrs['FIELDNAM'] = 'Burst' dat['Burst1'].attrs['LABLAXIS'] = 'Burst Parameter ch1' dat['Burst1'].attrs['SCALETYP'] = 'linear' #dat['time'].attrs['UNITS'] = 'none' dat['Burst1'].attrs['UNITS'] = '' dat['Burst1'].attrs['VALIDMIN'] = 0 dat['Burst1'].attrs['VALIDMAX'] = 2**4-1 dat['Burst1'].attrs['VAR_TYPE'] = 'data' dat['Burst1'].attrs['VAR_NOTES'] = 'Burst parameter compressed onboard' dat['Burst1'].attrs['DEPEND_0'] = 'Epoch' dat['Burst1'].attrs['FILLVAL'] = 2**8-1 dat['Burst2'] = dm.dmarray(data[:, 1]) dat['Burst2'].attrs['CATDESC'] = 'Burst parameter ch2' dat['Burst2'].attrs['FIELDNAM'] = 'Burst' dat['Burst2'].attrs['LABLAXIS'] = 'Burst Parameter ch2' dat['Burst2'].attrs['SCALETYP'] = 'linear' #dat['time'].attrs['UNITS'] = 'none' dat['Burst2'].attrs['UNITS'] = '' dat['Burst2'].attrs['VALIDMIN'] = 0 dat['Burst2'].attrs['VALIDMAX'] = 2**4-1 dat['Burst2'].attrs['VAR_TYPE'] = 'data' dat['Burst2'].attrs['VAR_NOTES'] = 'Burst parameter compressed onboard' dat['Burst2'].attrs['DEPEND_0'] = 'Epoch' dat['Burst2'].attrs['FILLVAL'] = 2**8-1 dat['Epoch'] = dm.dmarray(dt) dat['Epoch'].attrs['CATDESC'] = 'Default Time' dat['Epoch'].attrs['FIELDNAM'] = 'Epoch' #dat['Epoch'].attrs['FILLVAL'] = datetime.datetime(2100,12,31,23,59,59,999000) dat['Epoch'].attrs['LABLAXIS'] = 'Epoch' dat['Epoch'].attrs['SCALETYP'] = 'linear' dat['Epoch'].attrs['UNITS'] = 'ms' dat['Epoch'].attrs['VALIDMIN'] = datetime.datetime(1990,1,1) dat['Epoch'].attrs['VALIDMAX'] = datetime.datetime(2029,12,31,23,59,59,999000) dat['Epoch'].attrs['VAR_TYPE'] = 'support_data' dat['Epoch'].attrs['TIME_BASE'] = '0 AD' dat['Epoch'].attrs['MONOTON'] = 'INCREASE' dat['Epoch'].attrs['VAR_NOTES'] = 'Epoch at each configuration point' self.data = dat
def _read(self, lines, starttime): ''' Read all ascii line files; should only be called upon instantiation. ''' from glob import glob self['files'] = glob(lines) # Read first file; use it to set up all arrays. l1 = Line(self['files'][0], starttime=starttime) nA = l1.attrs['nAlt'] nT = l1.attrs['nTime'] nF = len(self['files']) self['r'] = l1['r'] self.attrs['nAlt'] = nA self.attrs['nTime'] = nT self.attrs['nFile'] = nF self['time'] = l1['time'] # Create all arrays. for v in l1._rawvar: self[v] = dmarray(np.zeros((nF, nT, nA))) self[v][0, :, :] = l1[v] if v == 'Lat' or v == 'Lon': self[v].attrs['units'] = 'deg' elif v[0] == 'u': self[v].attrs['units'] = 'km/s' elif v[0:3] == 'lgn': self[v].attrs['units'] = 'log(cm-3)' elif v[0] == 'T': self[v].attrs['units'] = 'K' else: self[v].attrs['units'] = None # Place data into arrays. for i, f in enumerate(self['files'][1:]): l = Line(f) for v in l1._rawvar: self[v][i + 1, :, :] = l[v] # Get non-log densities. logVars = [] for v in self: if v[:3] == 'lgn': logVars.append(v) for v in logVars: self[v[2:]] = dmarray(10.**self[v], {'units': r'$cm^{-3}$'})
def test_more_attrs(self): """more attrs are allowed if they are predefined""" a = dm.dmarray([1,2,3]) a.Allowed_Attributes = a.Allowed_Attributes + ['blabla'] a.blabla = {} a.blabla['foo'] = 'you' self.assertEqual(a.blabla['foo'], 'you')
def runQs(): #make figure fig = plt.figure(figsize=(17, 21)) #set up test runs nvals, rvals = 5, [2, 3, 4, 5, 6] dates = spt.tickrange('2001-04-22T12:00:00', '2001-04-23T23:00:00', 1/24) alpha = 45 #loop over radial positions, dates and quality flags for rval in rvals: Lstar = [[]]*nvals for date, qual in itertools.product(dates.UTC, range(nvals)): pos = dm.dmarray([-1*rval, 0, 0], attrs={'sys': 'SM'}) data = lgmpy.get_Lstar(pos, date, alpha=alpha, coord_system='SM', Bfield='Lgm_B_cdip', LstarThresh=20.0, extended_out=True, LstarQuality=qual) try: Lstar[qual].extend(data[alpha]['Lstar']) except TypeError: Lstar[qual].append(data[alpha]['Lstar'].tolist()) print('Did [-%d,0,0] for all qualities' % rval) #make plots fstr = '%d1%d' % (len(rvals), rval-rvals[0]+1) ax = fig.add_subplot(fstr) ax.boxplot(Lstar) #ax = plt.gca() ax.set_title('LGM - Centred dipole [-%d, 0, 0]$_{SM}$; PA=%d$^{o}$' % (rval, alpha)) ax.set_ylabel('L* (LanlGeoMag)') ax.set_xlabel('Quality Flag') ax.set_xticklabels([str(n) for n in range(5)]) tb.savepickle('lgm_cdip_lstar%d_alpha%d.pkl' % (rval, alpha), {'Lstar': Lstar}) plt.ylim([rval-0.04, rval+0.04]) plt.savefig('lgm_cdip_verify%d.png' % alpha, dpi=300)
def calc_tec(self): ''' A routine to calculate the 2D VTEC. To perform these calculations, electron density ("e-") must be one of the available data types. ''' import scipy.integrate as integ from scipy.interpolate import interp1d if 'e-' in self: self['VTEC'] = dmarray(self['e-'] * 1.0e-16, attrs={ 'units': 'TECU', 'scale': 'linear', 'name': 'Vertical TEC' }) for ilon in range(self.attrs['nLon']): for ilat in range(self.attrs['nLat']): # Integrate electron density over altitude, not including # ghost cells vtec = integ.simps(self['VTEC'][ilon, ilat, 2:-2], self['Altitude'][ilon, ilat, 2:-2], "avg") self['VTEC'][ilon, ilat, :] = vtec
def calc_lt(self): ''' Gitm defaults to universal time. Compute local time from date and longitude. ''' from numpy import pi import math ut = (self['time'].hour * 3600 + self['time'].minute * 60 + self['time'].second + self['time'].microsecond * 1e-6) / 3600.0 self['LT'] = dmarray(ut + self['Longitude'] * 12.0 / pi, attrs={ 'units': 'hours', 'scale': 'linear', 'name': 'Local Time' }) # Because datetime won't do lists or arrays if dmarray.max(self['LT']) >= 24.0: for i in range(self.attrs['nLon']): # All local times will be the same for each lat/alt # in the same longitude index ltmax = dmarray.max(self['LT'][i]) if ltmax >= 24.0: self['LT'][i] -= 24.0 * math.floor(ltmax / 24.0)
def calc_deg(self): """ Gitm defaults to radians for lat and lon, which is sometimes difficult to use. This routine leaves the existing latitude and longitude arrays intact and creates *dLat* and *dLon*, which contain the lat and lon in degrees. """ from numpy import pi import string self["dLat"] = dmarray( self["Latitude"] * 180.0 / pi, attrs={"units": "degrees", "scale": "linear", "name": "Latitude"} ) self["dLon"] = dmarray( self["Longitude"] * 180.0 / pi, attrs={"units": "degrees", "scale": "linear", "name": "Longitude"} )
def calc_deg(self): ''' Gitm defaults to radians for lat and lon, which is sometimes difficult to use. This routine leaves the existing latitude and longitude arrays intact and creates *dLat* and *dLon*, which contain the lat and lon in degrees. ''' from numpy import pi import string self['dLat'] = dmarray(self['Latitude']*180.0/pi, attrs={'units':'degrees', 'scale':'linear', 'name':'Latitude'}) self['dLon'] = dmarray(self['Longitude']*180.0/pi, attrs={'units':'degrees', 'scale':'linear', 'name':'Longitude'})
def setUp(self): super(converterTestsCDF, self).setUp() self.SDobj = dm.SpaceData(attrs={'global': 'test'}) self.SDobj['var'] = dm.dmarray([1, 2, 3], attrs={'a': 'a'}) self.testdir = tempfile.mkdtemp() self.testfile = os.path.join(self.testdir, 'test.cdf') warnings.simplefilter('error', dm.DMWarning)
def calc_flux(self): ''' Calculate flux in units of #/cm2/s. Variables saved as self[species+'Flux']. ''' for s in ['H', 'O', 'He', 'e']: self[s+'Flux']=dmarray(1000*self['n'+s]*self['u'+s], {'units':'$cm^{-2}s^{-1}$'})
def test_addAttribute(self): """addAttribute should work""" a = dm.dmarray([1,2,3]) a.addAttribute('bla') self.assertEqual(a.bla, None) a.addAttribute('bla2', {'foo': 'bar'}) self.assertEqual(a.bla2['foo'], 'bar') self.assertRaises(NameError, a.addAttribute, 'bla2')