示例#1
0
def FIRE_HiRes_L1_L2(datafile, ephemfile):
    full_data = dm.readJSONheadedASCII(datafile)
    ephem = dm.readJSONheadedASCII(ephemfile)
    data = Trim_data_file(full_data, ephem)
    labels = ephem.keys()
    ephem_fields = ['Lsimple', 'CDMAG_MLT']
    dt = spt.Ticktock(data['Epoch']).TAI
    et = spt.Ticktock(ephem['DateTime']).TAI
    for i in range(len(ephem_fields)):
        print ephem_fields[i]
        y = ephem[ephem_fields[i]]
        nx = tb.interpol(dt, et, y)
        data[ephem_fields[i]] = dm.dmarray(nx)
    ephem_lat = ephem['Rgeod_LatLon'][:,0]
    ephem_lon = ephem['Rgeod_LatLon'][:,1]
    nx = tb.interpol(dt, et, ephem_lat)
    data['Lat'] = dm.dmarray(nx)
    nx = tb.interpol(dt, et, ephem_lon)
    data['Lon'] = dm.dmarray(nx)
    n_lines = len(data['Epoch'])
    eflux = np.zeros(n_lines,12)
    day = ephem['DateTime'][0][0:10]
    outfile = datafile[:-23] + day + '-HiRes_L2.txt'
    dm.toJSONheadedASCII(outfile, data)
    return data
示例#2
0
 def test_readJSONheadedASCII(self):
     """readJSONheadedASCII should read the test file"""
     dat = dm.readJSONheadedASCII(self.filename)
     keys = ['PerigeePosGeod', 'S_sc_to_pfn', 'S_pfs_to_Bmin', 'Pfs_gsm',
             'Pfn_ED_MLAT', 'ED_R', 'Dst', 'DateTime', 'DOY', 'ED_MLON',
             'IntModel', 'ApogeePosGeod', 'CD_MLON', 'S_sc_to_pfs',
             'GpsTime', 'JulianDate', 'M_ref', 'ED_MLT', 'Pfs_ED_MLAT',
             'Bfs_geo', 'Bm', 'Pfn_CD_MLON', 'CD_MLAT', 'Pfs_geo',
             'Rsm', 'Pmin_gsm', 'Rgei', 'Rgsm', 'Pfs_CD_MLAT', 'S_total',
             'Rgeod_Height', 'Date', 'Alpha', 'M_igrf', 'Pfs_CD_MLT',
             'ED_MLAT', 'CD_R', 'PerigeeTimes', 'UTC', 'Pfn_ED_MLT',
             'BoverBeq', 'Lsimple', 'Lstar', 'I', 'DipoleTiltAngle',
             'K', 'Bmin_gsm', 'S_Bmin_to_sc', 'Bfs_gsm', 'L',
             'ApogeeTimes', 'ExtModel', 'Kp', 'Pfs_geod_LatLon',
             'MlatFromBoverBeq', 'Pfn_gsm', 'Loss_Cone_Alpha_n', 'Bfn_geo',
             'Pfn_CD_MLAT', 'Rgeod_LatLon', 'Pfs_ED_MLT', 'Pfs_CD_MLON',
             'Bsc_gsm', 'Pfn_geod_Height', 'Lm_eq', 'Rgse',
             'Pfn_geod_LatLon', 'CD_MLT', 'FieldLineType', 'Pfn_CD_MLT',
             'Pfs_geod_Height', 'Rgeo', 'InvLat_eq', 'M_used',
             'Loss_Cone_Alpha_s', 'Bfn_gsm', 'Pfn_ED_MLON', 'Pfn_geo',
             'InvLat', 'Pfs_ED_MLON']
     if str is bytes:
         keys = [unicode(k) for k in keys]
     # make sure data has all the keys and no more or less
     for k in dat:
         self.assertTrue(k in keys)
         ind = keys.index(k)
         del keys[ind]
     self.assertEqual(len(keys), 0)
     dat = dm.readJSONheadedASCII(self.filename, convert=True)
     np.testing.assert_array_equal(dat['DateTime'], [datetime.datetime(2013, 2, 18, 0, 0), datetime.datetime(2013, 2, 18, 0, 5)])
示例#3
0
 def test_toJSONheadedASCII(self):
     """Write known datamodel to JSON-headed ASCII and ensure it has right stuff added"""
     a = dm.SpaceData()
     a.attrs['Global'] = 'A global attribute'
     a['Var1'] = dm.dmarray([1, 2, 3, 4, 5],
                            attrs={'Local1': 'A local attribute'})
     a['Var2'] = dm.dmarray([[8, 9], [9, 1], [3, 4], [8, 9], [7, 8]])
     a['MVar'] = dm.dmarray([7.8], attrs={'Note': 'Metadata'})
     t_file = tempfile.NamedTemporaryFile(delete=False)
     t_file.close()
     dm.toJSONheadedASCII(t_file.name,
                          a,
                          depend0='Var1',
                          order=['Var1', 'Var2'])
     dat2 = dm.readJSONheadedASCII(t_file.name)
     #test global attr
     self.assertTrue(a.attrs == dat2.attrs)
     #test that metadata is back and all original keys are present
     for key in a['MVar'].attrs:
         self.assertTrue(key in dat2['MVar'].attrs)
     np.testing.assert_array_equal(a['MVar'], dat2['MVar'])
     #test vars are right
     np.testing.assert_almost_equal(a['Var1'], dat2['Var1'])
     np.testing.assert_almost_equal(a['Var2'], dat2['Var2'])
     #test for added dimension and start col
     self.assertTrue(dat2['Var1'].attrs['DIMENSION'] == [1])
     self.assertTrue(dat2['Var2'].attrs['DIMENSION'] == [2])
     os.remove(t_file.name)
示例#4
0
 def test_toHTML(self):
     """toHTML should give known output"""
     t_file = tempfile.NamedTemporaryFile(delete=False)
     t_file.close()
     dat = dm.readJSONheadedASCII(self.filename)
     dm.toHTML(t_file.name, dat, attrs=['DESCRIPTION', 'UNITS', 'ELEMENT_LABELS'], varLinks=True)
     if sys.platform == 'win32': #Different line endings
         expected = 12916 if str is bytes else 12892
     else:
         expected = 12834 if str is bytes else 12810 #no u on unicode strings
     self.assertEqual(expected, os.path.getsize(t_file.name)) # not the best test but I am lazy
     os.remove(t_file.name)
示例#5
0
 def test_writeJSONMetadata(self):
     """reading metadata should give same keys as original datamodel"""
     dat = dm.readJSONMetadata(self.filename)
     # make sure data has all te keys and no more or less
     t_file = tempfile.NamedTemporaryFile(delete=False)
     t_file.close()
     dm.writeJSONMetadata(t_file.name, dat)
     dat2 = dm.readJSONheadedASCII(t_file.name)
     os.remove(t_file.name)
     keylist1 = sorted(dat.keys())
     keylist2 = sorted(dat2.keys())
     self.assertTrue(keylist1==keylist2)
     #now test that values in some metadata are identical
     self.assertTrue((dat['PerigeePosGeod'] == dat2['PerigeePosGeod']).all())
示例#6
0
def FIRE_Context_L1_L2(datafile, ephemfile):
    full_data = dm.readJSONheadedASCII(datafile)
    ephem = dm.readJSONheadedASCII(ephemfile)
    meta = dm.readJSONheadedASCII(ephemfile)
    data = Trim_data_file(full_data, ephem)
    labels = ephem.keys()
    ephem_fields = test_ephem_list
    dt = spt.Ticktock(data['Epoch']).TAI
    et = spt.Ticktock(ephem['DateTime']).TAI
    for i in range(len(ephem_fields)):
        dim = np.size(ephem[ephem_fields[i]][0])
        print ephem_fields[i], dim
        nx = np.empty([len(dt),dim])
        if dim > 1:
            for j in range(dim):
                y = ephem[ephem_fields[i]][:,j]
                nx[:,j] = tb.interpol(dt, et, y)
            data[ephem_fields[i]] = dm.dmarray(nx, attrs = meta[ephem_fields[i]].attrs)
        else:
            y = ephem[ephem_fields[i]] 
            nx = tb.interpol(dt, et, y)
            data[ephem_fields[i]] = dm.dmarray(nx, attrs = meta[ephem_fields[i]].attrs)
    col = deepcopy(data['Context'][:,0])
    sur = deepcopy(data['Context'][:,1])
    despike(col, 250, 10)
    despike(sur, 250, 10)
    col = (col/(6*9))*1033
    sur = (sur/(6*23))*772
    data['col'] = dm.dmarray(col, attrs = {'Description': 'Collimated Detector Energy Flux', 'SCALE_TYPE': 'log'})
    data['sur'] = dm.dmarray(sur, attrs = {'Description': 'Surface Detector Energy Flux', 'SCALE_TYPE': 'log'})
    day = ephem['DateTime'][0][0:10]
    outfile = datafile[:-25] + day + '-Context_L2.txt'
    order = ['Epoch', 'col', 'sur', 'Context']
#    order = ['Epoch', 'Context']
    order.extend(ephem_fields)
    dm.toJSONheadedASCII(outfile, data)
    return data
示例#7
0
 def test_toHTML(self):
     """toHTML should give known output"""
     t_file = tempfile.NamedTemporaryFile(delete=False)
     t_file.close()
     dat = dm.readJSONheadedASCII(self.filename)
     dm.toHTML(t_file.name, dat, attrs=['DESCRIPTION', 'UNITS', 'ELEMENT_LABELS'], varLinks=True)
     if sys.platform == 'win32':
         expected = 12916 #different line-endings
     else:
         if str is bytes:
             expected = 12834
         else:
             expected = 12810 #no u on the unicode strings
     self.assertEqual(expected, os.path.getsize(t_file.name)) # not the best test but I am lazy
     os.remove(t_file.name)
示例#8
0
def readfiles(files):
    """
    read in the files and store the data we need to data
    """
    global data # glabal means we can edit this
    files = set(files) # make this a unique set of files so that the same is not read twice
    for f in files:
        try:
            dat = dm.readJSONheadedASCII(f)
        except (IOError, ValueError):
            print('File {0} is not valid JSONheadedascii, skipped'.format(f))
            continue
        if not isHires(dat) and not isConfig(dat):
            print('File {0} is not hires or config, skipped'.format(f))
            continue # not the kind of file we want
        ## file is now for sure wanted store its info to data
        data[f] = dat
示例#9
0
def parseData_Times(fname):
    """
    TODO in here if we dont get at least a page of time for a data type is likely missing, so probably don't ask

    TODO if the data times duration is less than 90 minutes then something happened making the data in that sepment suspect
    """
    if fname is None:
        return None
    data = dm.readJSONheadedASCII(fname, convert={'Epoch':lambda x: dup.parse(x, ignoretz=True),
                                                  'Time':lambda x: dup.parse(x, ignoretz=True)})
    t1 = np.asarray([v.replace(microsecond=0) for v in data['Epoch']])
    t2 = np.asarray([v.replace(microsecond=0) for v in data['Time']])

    outdat = dm.SpaceData()
    outdat['On']  = np.asarray( [t1[data['Mode'].astype(np.bool)],  t2[data['Mode'].astype(np.bool)]]).T
    outdat['Off'] = np.asarray( [t1[~data['Mode'].astype(np.bool)], t2[~data['Mode'].astype(np.bool)]]).T
    return outdat
示例#10
0
 def test_toJSONheadedASCII_method(self):
     """Write known datamodel to JSON-headed ASCII and ensure it has right stuff added"""
     a = dm.SpaceData()
     a.attrs['Global'] = 'A global attribute'
     a['Var1'] = dm.dmarray([1,2,3,4,5], attrs={'Local1': 'A local attribute'})
     a['Var2'] = dm.dmarray([[8,9],[9,1],[3,4],[8,9],[7,8]])
     a['MVar'] = dm.dmarray([7.8], attrs={'Note': 'Metadata'})
     t_file = tempfile.NamedTemporaryFile(delete=False)
     t_file.close()
     a.toJSONheadedASCII(t_file.name, depend0='Var1', order=['Var1','Var2'])
     dat2 = dm.readJSONheadedASCII(t_file.name)
     #test global attr
     self.assertTrue(a.attrs==dat2.attrs)
     #test that metadata is back and all original keys are present
     for key in a['MVar'].attrs:
         self.assertTrue(key in dat2['MVar'].attrs)
     np.testing.assert_array_equal(a['MVar'], dat2['MVar'])
     #test vars are right
     np.testing.assert_almost_equal(a['Var1'], dat2['Var1'])
     np.testing.assert_almost_equal(a['Var2'], dat2['Var2'])
     #test for added dimension and start col
     self.assertTrue(dat2['Var1'].attrs['DIMENSION']==[1])
     self.assertTrue(dat2['Var2'].attrs['DIMENSION']==[2])
     os.remove(t_file.name)
示例#11
0
        if not os.path.isfile(f):
            parser.error("File {0} does not exist".format(f))


#==============================================================================
# deal with the filetype options
#==============================================================================

    for f in infiles:
        try:
            tp = determineFileType(f)
        except (ValueError, NotImplementedError):
            # could not determine the type, die
            parser.error("Could not determine the file type: {0}".format(f))

        dat = dm.readJSONheadedASCII(f)
        dat['Epoch'][...] = [dup.parse(v) for v in dat['Epoch'][...]]

        fig = plt.figure()
        ax = fig.add_subplot(111)
        imname = f + '_ql_{0}.png'.format(datetime.datetime.now()).replace(' ', 'T').replace(':', '-')

        if tp == 'configfile':
            for var in range(16):
                ax.plot(dat['Epoch'], dat['reg{0:02}'.format(var)], label='reg{0:02}'.format(var))
            ax.set_ylim((-10, 300))
            ax.set_xlim((ax.get_xlim()[0], ax.get_xlim()[1]+(ax.get_xlim()[1]-ax.get_xlim()[0])*0.45))
            ax.legend(loc='upper right')
            fig.savefig(imname)
            plt.close()
        elif tp == 'mbpfile':
      try:        
        date1=datetime.datetime.strftime(dt1, '%Y%m%d')
        print(date1)
        # to wake up the nfs server I hope
        os.chdir('/Users/loisks/Desktop/liemohn10/loisks/'+dir[idir])
        emf_file=name[idir]+'_WFR-spectral-matrix_emfisis-L2_'+date1+'*.cdf'
        gemf=glob.glob(emf_file)
        pyf=pycdf.CDF(gemf[0])
        os.chdir('..')
        # get the polarization
        polarization=gM.mag_SVD(pyf, ifreq)
        #
        # get the emphermeris file
        os.chdir('/Users/loisks/Desktop/liemohn10/loisks/EMPHEMERIS_'+sat[idir])
        hope_emphem=glob.glob('rbsp'+lsat[idir]+'_def_MagEphem_OP77Q_'+date1+'*.txt')[0]
        pyf2=dm.readJSONheadedASCII(hope_emphem)
        L_emphem=np.nanmedian(pyf2['L'], axis=1) # L from emphemeris
        L_emphem[L_emphem<0]=np.nan
     
        MLT_emphem=pyf2['CDMAG_MLT'] # MLT from ephemeris
        MLT_emphem[MLT_emphem<0]=np.nan     
        epoch_ephem=epochL4=pd.DatetimeIndex(pyf2['DateTime'])
        #
        # great, now resample this stuff
        LEMP=pd.DataFrame({'L':L_emphem},index=epoch_ephem)
        MLTEMP=pd.DataFrame({'MLT':MLT_emphem},index=epoch_ephem)
          
        # get the coordinates from the EMFISIS L4 files
        # time array is 14400, reduce by factor of 10
        Kp=np.array(KpArr[date1])
示例#13
0
def get_omni(ticks, dbase='QDhourly', **kwargs):
    '''
    Returns Qin-Denton OMNI values, interpolated to any time-base from a default hourly resolution

    The update function in toolbox retrieves all available hourly Qin-Denton data, 
    and this function accesses that and interpolates to the given times,
    returning the OMNI values as a SpaceData (dict-like) with
    Kp, Dst, dens, velo, Pdyn, ByIMF, BzIMF, G1, G2, G3, etc.
    (see also http://www.dartmouth.edu/~rdenton/magpar/index.html and
    http://www.agu.org/pubs/crossref/2007/2006SW000296.shtml )

    Parameters
    ==========
    ticks : Ticktock class or array-like of datetimes
        time values for desired output

    dbase : str (optional)
        Select data source, options are 'QDhourly', 'OMNI2hourly', 'Mergedhourly'
        Note - Custom data sources can be specified in the spacepy config file
        as described in the module documentation.

    Returns
    =======
    out : spacepy.datamodel.SpaceData
        containing all Qin-Denton values at times given by ticks

    Examples
    ========
    >>> import spacepy.time as spt
    >>> import spacepy.omni as om
    >>> ticks = spt.Ticktock(['2002-02-02T12:00:00', '2002-02-02T12:10:00'], 'ISO')
    >>> d = om.get_omni(ticks)
    >>> d.tree(levels=1)
    +
    |____ByIMF
    |____Bz1
    |____Bz2
    |____Bz3
    |____Bz4
    |____Bz5
    |____Bz6
    |____BzIMF
    |____DOY
    |____Dst
    |____G1
    |____G2
    |____G3
    |____Hr
    |____Kp
    |____Pdyn
    |____Qbits
    |____RDT
    |____UTC
    |____W1
    |____W2
    |____W3
    |____W4
    |____W5
    |____W6
    |____Year
    |____akp3
    |____dens
    |____ticks
    |____velo


    Notes
    =====
    Note about Qbits: If the status variable is 2, the quantity you are using is fairly well
    determined. If it is 1, the value has some connection to measured values, but is not directly
    measured. These values are still better than just using an average value, but not as good
    as those with the status variable equal to 2. If the status variable is 0, the quantity is
    based on average quantities, and the values listed are no better than an average value. The
    lower the status variable, the less confident you should be in the value.

    '''
    dbase_options = {
        'QDhourly': 1,
        'OMNI2hourly': 2,
        'Mergedhourly': 3,
        'Test': -9,
    }

    if not isinstance(ticks, spt.Ticktock):
        try:
            ticks = spt.Ticktock(ticks, 'UTC')
        except:
            raise TypeError(
                'get_omni: Input times must be a Ticktock object or a list of datetime objects'
            )

    if not dbase in dbase_options:
        from spacepy import config
        if dbase in config:
            #If a dbase is specified that isn't a default, then it MUST be in the spacepy config
            qdpath = os.path.split(os.path.split(config[dbase])[0])[0]
            if not os.path.isdir(qdpath):
                raise IOError(
                    'Specified dbase ({0}) does not have a valid location ({1})'
                    .format(dbase, config[dbase]))
            days = list(set([tt.date() for tt in ticks.UTC]))
            flist = [''] * len(days)
            fnpath, fnformat = os.path.split(config[dbase])
            for idx, day in enumerate(days):
                dp = fnpath.replace('YYYY', '{0}'.format(day.year))
                df = fnformat.replace('YYYY', '{0}'.format(day.year))
                df = df.replace('MM', '{0:02d}'.format(day.month))
                df = df.replace('DD', '{0:02d}'.format(day.day))
                flist[idx] = os.path.join(dp, df)
            if 'convert' in kwargs:
                convdict = kwargs['convert']
            else:
                convdict = True  #set to True as default?
            if 'interp' not in kwargs:
                kwargs['interp'] = True
            data = readJSONheadedASCII(sorted(flist), convert=convdict)
            omniout = SpaceData()

            time_var = [
                var for var in ['DateTime', 'Time', 'Epoch', 'UTC']
                if var in data
            ]
            if time_var:
                use_t_var = time_var[0]
            else:
                #no obvious time variable in input files ... can't continue
                raise ValueError('No clear time variable in file')

            if kwargs['interp'] is True:
                data['RDT'] = spt.Ticktock(data[use_t_var]).RDT
                keylist = sorted(data.keys())
                dum = keylist.pop(keylist.index(use_t_var))
                for key in keylist:
                    try:
                        omniout[key] = dmarray(
                            np.interp(ticks.RDT,
                                      data['RDT'],
                                      data[key],
                                      left=np.NaN,
                                      right=np.NaN))
                        omniout[key].attrs = dmcopy(data[key].attrs)
                    except:
                        try:
                            omniout[key] = dmfilled(
                                [len(ticks.RDT), data[key].shape[1]],
                                fillval=np.NaN,
                                attrs=dmcopy(data[key].attrs))
                            for col in range(data[key].shape[1]):
                                omniout[key][:,
                                             col] = np.interp(ticks.RDT,
                                                              data['RDT'],
                                                              data[key][:,
                                                                        col],
                                                              left=np.NaN,
                                                              right=np.NaN)
                        except ValueError:
                            print(
                                'Failed to interpolate {0} to new time base, skipping variable'
                                .format(key))
                        except IndexError:
                            print(
                                'Variable {0} appears to be non-record varying, skipping interpolation'
                                .format(key))
                            omniout[key] = data[key]
                omniout['UTC'] = ticks.UTC
            else:
                #Trim to specified times
                inds = tOverlapHalf([ticks[0].RDT, ticks[-1].RDT],
                                    spt.Ticktock(data['DateTime']).RDT)
                for key in data:
                    if len(inds) == len(data[key]):
                        omniout[key] = data[key][inds]
                    else:  #is ancillary data
                        omniout[key] = data[key]
                #TODO: convert to same format as OMNI/QD read (or vice versa)
                omniout['UTC'] = omniout[use_t_var]
            return omniout
        else:
            raise IOError(
                'Specified dbase ({0}) must be specified in spacepy.config'.
                format(dbase))

    def getattrs(hf, key):
        out = {}
        if hasattr(hf[key], 'attrs'):
            for kk, value in hf[key].attrs.items():
                try:
                    out[kk] = value
                except:
                    pass
        return out

    def HrFromDT(indt):
        hour = indt.hour
        minute = indt.minute
        second = indt.second
        musecond = indt.microsecond
        return hour + (minute / 60.0) + (second / 3600.0) + (musecond /
                                                             3600.0e3)

    import h5py as h5
    fname, QDkeylist, O2keylist = '', [], []
    omnivals = SpaceData()
    dbase_select = dbase_options[dbase]
    if dbase_select in [1, 3, -9]:
        if dbase_select > 0:
            ldb = 'QDhourly'
            fln = omnifln
        else:
            ldb = 'Test'
            fln = testfln
        with h5.File(fln, 'r') as hfile:
            QDkeylist = [kk for kk in hfile if kk not in ['Qbits', 'UTC']]
            st, en = ticks[0].RDT, ticks[-1].RDT
            ##check that requested requested times are within range of data
            enval, stval = omnirange(dbase=ldb)[1], omnirange(dbase=ldb)[0]
            if (ticks.UTC[0] > enval) or (ticks[-1] < stval):
                raise ValueError('Requested dates are outside data range')
            if (ticks.UTC[-1] > enval) or (ticks[0] < stval):
                print(
                    'Warning: Some requested dates are outside data range ({0})'
                    .format(ldb))
            inds = tOverlapHalf([st, en], hfile['RDT'],
                                presort=True)  #returns an xrange
            inds = indsFromXrange(inds)
            if inds[0] < 1: inds[0] = 1
            sl_op = slice(inds[0] - 1, inds[-1] + 2)

            fname = ','.join([fname, hfile.filename])
            omnivals.attrs = getattrs(hfile, '/')
            for key in QDkeylist:
                omnivals[key] = dmarray(
                    hfile[key][sl_op])  #TODO: add attrs from h5
                omnivals[key].attrs = getattrs(hfile, key)
            for key in hfile['Qbits']:
                omnivals['Qbits<--{0}'.format(key)] = dmarray(
                    hfile['/Qbits/{0}'.format(key)][sl_op])
                omnivals['Qbits<--{0}'.format(key)].attrs = getattrs(
                    hfile, '/Qbits/{0}'.format(key))
                QDkeylist.append('Qbits<--{0}'.format(key))

    if dbase_options[dbase] == 2 or dbase_options[dbase] == 3:
        ldb = 'OMNI2hourly'
        with h5.File(omni2fln) as hfile:
            O2keylist = [kk for kk in hfile if kk not in ['Epoch', 'RDT']]
            st, en = ticks[0].RDT, ticks[-1].RDT
            ##check that requested requested times are within range of data
            enval, stval = omnirange(dbase=ldb)[1], omnirange(dbase=ldb)[0]
            if (ticks[0].UTC > enval) or (ticks[-1] < stval):
                raise ValueError('Requested dates are outside data range')
            if (ticks[-1].UTC > enval) or (ticks[0] < stval):
                print(
                    'Warning: Some requested dates are outside data range ({0})'
                    .format(ldb))
            inds = tOverlapHalf([st, en], hfile['RDT'],
                                presort=True)  #returns an xrange
            inds = indsFromXrange(inds)
            if inds[0] < 1: inds[0] = 1
            sl_op = slice(inds[0] - 1, inds[-1] + 2)

            fname = ','.join([fname, hfile.filename])
            omnivals.attrs = getattrs(
                hfile, '/'
            )  #TODO: This overwrites the previous set on Merged load... Fix!
            omnivals['RDT_OMNI'] = dmarray(hfile['RDT'][sl_op])
            for key in O2keylist:
                omnivals[key] = dmarray(
                    hfile[key][sl_op])  #TODO: add attrs from h5
                omnivals[key].attrs = getattrs(hfile, key)

    if dbase_options[dbase] == 3:
        #prune "merged" SpaceData
        sigmas = [key for key in omnivals if 'sigma' in key]
        for sk in sigmas:
            del omnivals[sk]
        bees = [key for key in omnivals if re.search('B._', key)]
        for bs in bees:
            del omnivals[bs]
        aves = [key for key in omnivals if ('_ave' in key) or ('ave_' in key)]
        for av in aves:
            del omnivals[av]

    omniout = SpaceData(attrs=dmcopy(omnivals.attrs))
    omniout.attrs['filename'] = fname[1:]
    ###print('QDkeys: {0}\n\nO2keys: {1}'.format(QDkeylist, O2keylist))
    for key in sorted(omnivals.keys()):
        if key in O2keylist:
            omniout[key] = dmarray(
                np.interp(ticks.RDT,
                          omnivals['RDT_OMNI'],
                          omnivals[key],
                          left=np.NaN,
                          right=np.NaN))
            #set metadata -- assume this has been set properly in d/l'd file to match ECT-SOC files
            omniout[key].attrs = dmcopy(omnivals[key].attrs)
        elif key in QDkeylist:
            omniout[key] = dmarray(
                np.interp(ticks.RDT,
                          omnivals['RDT'],
                          omnivals[key],
                          left=np.NaN,
                          right=np.NaN))
            omniout[key].attrs = dmcopy(omnivals[key].attrs)
        if key == 'G3':  #then we have all the Gs
            omniout['G'] = dmarray(
                np.vstack([omniout['G1'], omniout['G2'], omniout['G3']]).T)
            omniout['G'].attrs = dmcopy(omnivals['G1'].attrs)
            for i in range(1, 4):
                del omniout['G{0}'.format(i)]
        if key == 'W6':
            omniout['W'] = dmarray(
                np.vstack([
                    omniout['W1'], omniout['W2'], omniout['W3'], omniout['W4'],
                    omniout['W5'], omniout['W6']
                ]).T)
            omniout['W'].attrs = dmcopy(omnivals['W1'].attrs)
            for i in range(1, 7):
                del omniout['W{0}'.format(i)]
        if 'Qbits' in key:
            #Qbits are integer vals, higher is better, so floor to get best representation of interpolated val
            omniout[key] = np.floor(omnivals[key])
            omniout[key].attrs = dmcopy(omnivals[key].attrs)
            if 'G3' in key:  #then we have all the Gs
                omniout['Qbits<--G'] = dmarray(
                    np.vstack([
                        omniout['Qbits<--G1'], omniout['Qbits<--G2'],
                        omniout['Qbits<--G3']
                    ]).T)
                for i in range(1, 4):
                    del omniout['Qbits<--G{0}'.format(i)]
            if 'W6' in key:
                omniout['Qbits<--W'] = dmarray(
                    np.vstack([
                        omniout['Qbits<--W1'], omniout['Qbits<--W2'],
                        omniout['Qbits<--W3'], omniout['Qbits<--W4'],
                        omniout['Qbits<--W5'], omniout['Qbits<--W6']
                    ]).T)
                for i in range(1, 7):
                    del omniout['Qbits<--W{0}'.format(i)]

    omniout['ticks'] = ticks
    omniout['UTC'] = ticks.UTC
    omniout['Hr'] = dmarray([HrFromDT(val) for val in omniout['UTC']])
    omniout['Year'] = dmarray([val.year for val in omniout['UTC']])
    omniout = unflatten(omniout)

    return omniout
 date=datetime.datetime.strftime(dt1, '%Y%m%d')
 os.chdir('/Users/loisks/Desktop/liemohn10/loisks/EMFISIS_'+dir[idir])
 emf_file='*'+date+'*.cdf'
 gemf=glob.glob(emf_file)
 pyf=pycdf.CDF(gemf[0])
 bTime=pd.DatetimeIndex(pyf['Epoch'][...])
 bMag=pyf['Magnitude'][...]
 dB=pd.DataFrame({'Bmag':bMag}, index=bTime)
 
 #
 # now have to get the emphermeris data
 os.chdir('..')
 os.chdir('EMPHEMERIS_'+dir[idir])
 emp_file='*'+date+'*.txt'
 gemp=glob.glob(emp_file)
 pyfemp=dm.readJSONheadedASCII(gemp[0])                
 L_emphem=pyfemp['L'] # L from emphemeris
 L_emphem=np.nanmedian(L_emphem, axis=1)
 L=np.array(L_emphem)
 Kp_emphem=pyfemp['Kp'] # Kp index from ephemeris file
 MLAT_emphem=pyfemp['CDMAG_MLAT'] # MLAT from ephemeris
 MLAT_emphem[MLAT_emphem<-100]=np.nan
 MLAT=np.array(MLAT_emphem)
 MLT_emphem=pyfemp['CDMAG_MLT'] # MLAT from ephemeris
 MLT_emphem[MLT_emphem<-100]=np.nan
 MLT=np.array(MLT_emphem)
 # emphemeris data is every minute
 rt = pd.period_range(date,periods=1441, freq='T').to_timestamp()
 #
 # resample the B data
 B=np.array(dB['Bmag'].resample('1min', how='mean').reindex(index=rt,fill_value=np.nan))
           skip
       # get the EFW data
       os.chdir('/Users/loisks/Desktop/liemohn10/loisks/EFW_L3_'+satellites[isat])
       f=glob.glob('*'+iDate+'*')
       pyf=pycdf.CDF(f[0])
       potential=-1*pyf['Vavg'][...]
       rng = pd.period_range(iDate,periods=1440, freq='T').to_timestamp()
       pEpoch=pd.DatetimeIndex(pyf['epoch'][...])
       df=pd.DataFrame(potential, index=pEpoch, columns=['potential'])
       Phi=df['potential'].resample('1min', how='median').reindex(rng, fill_value=np.nan)
       #
       # now get the ephemeris data
       eclipseFlag=np.zeros(len(rng))
       os.chdir('/Users/loisks/Desktop/liemohn10/loisks/EMPHEMERIS_'+satellites[isat])
       files=glob.glob('*'+iDate+'*')
       pyfem=dm.readJSONheadedASCII(files[0])
       gseCoords=np.swapaxes(pyfem["Rgse"], 1, 0)
       for iGSE in range(len(rng)):
          # subtract out an RE to get difference above the surface
          if ((gseCoords[1][iGSE])**2 + (gseCoords[2][iGSE])**2) < 1:
              if gseCoords[0][iGSE] < 0:
                  # satellite is in Earth's shadow
                  eclipseFlag[iGSE]=1
                  print 'eclipse flag'
       LShell=np.swapaxes(pyfem['L'],1,0)[0] # weird multiple rows with L
       MLT=pyfem['CDMAG_MLT']

      # now let's get dates and times where there is charging and it's
      # not in eclipse
       keyTimes=np.where((np.array(Phi) < 0) & (eclipseFlag != 1))[0]
       chargingYa=np.array(Phi[keyTimes])
示例#16
0
    return metrics


if __name__ == '__main__':
    #load output from month-long run
    infile = sys.argv[1]
    ##infile = 'log_n000000.log'
    data = ram.LogFile(infile)

    useBiot = True
    useDPS = False

    #get Sym-H from Kyoto WDC
    #kyotodata = kyo.fetch('sym', data['time'][0], data['time'][-1]+dt.timedelta(minutes=1))
    #read Kyoto data from saved file
    kyotodata = dm.readJSONheadedASCII('kyotodata_Jan2005.txt')

    #make plot
    fig = plt.figure(figsize=(10, 5))
    ax = fig.add_subplot(111)
    ax.plot(kyotodata['time'],
            kyotodata['sym-h'],
            color='black',
            label='Sym-H (Kyoto)')
    if useBiot and not useDPS:
        ax.plot(data['time'],
                data['dstBiot'],
                color='crimson',
                label='Sym-H (RAM)')
    elif useDPS and not useBiot:
        ax.plot(data['time'],
示例#17
0
 def test_toJSONheadedASCII_method_404(self):
     """Convert to toJSONheadedASCII, using the method, catching #404"""
     a = dm.SpaceData({'dat': dm.dmarray([1, 2, 3])})
     a.toJSONheadedASCII(self.testfile, mode='a')
     newobj = dm.readJSONheadedASCII(self.testfile)
     np.testing.assert_array_equal([1, 2, 3], newobj['dat'])
示例#18
0
文件: omni.py 项目: spacepy/spacepy
def get_omni(ticks, dbase='QDhourly', **kwargs):
    '''
    Returns Qin-Denton OMNI values, interpolated to any time-base from a default hourly resolution

    The update function in toolbox retrieves all available hourly Qin-Denton data, 
    and this function accesses that and interpolates to the given times,
    returning the OMNI values as a SpaceData (dict-like) with
    Kp, Dst, dens, velo, Pdyn, ByIMF, BzIMF, G1, G2, G3, etc.
    (see also http://www.dartmouth.edu/~rdenton/magpar/index.html and
    http://www.agu.org/pubs/crossref/2007/2006SW000296.shtml )

    Parameters
    ==========
    ticks : Ticktock class or array-like of datetimes
        time values for desired output

    dbase : str (optional)
        Select data source, options are 'QDhourly', 'OMNI2', 'Mergedhourly'
        Note - Custom data sources can be specified in the spacepy config file
        as described in the module documentation.

    Returns
    =======
    out : spacepy.datamodel.SpaceData
        containing all Qin-Denton values at times given by ticks

    Examples
    ========
    >>> import spacepy.time as spt
    >>> import spacepy.omni as om
    >>> ticks = spt.Ticktock(['2002-02-02T12:00:00', '2002-02-02T12:10:00'], 'ISO')
    >>> d = om.get_omni(ticks)
    >>> d.tree(levels=1)
    +
    |____ByIMF
    |____Bz1
    |____Bz2
    |____Bz3
    |____Bz4
    |____Bz5
    |____Bz6
    |____BzIMF
    |____DOY
    |____Dst
    |____G1
    |____G2
    |____G3
    |____Hr
    |____Kp
    |____Pdyn
    |____Qbits
    |____RDT
    |____UTC
    |____W1
    |____W2
    |____W3
    |____W4
    |____W5
    |____W6
    |____Year
    |____akp3
    |____dens
    |____ticks
    |____velo


    Notes
    =====
    Note about Qbits: If the status variable is 2, the quantity you are using is fairly well
    determined. If it is 1, the value has some connection to measured values, but is not directly
    measured. These values are still better than just using an average value, but not as good
    as those with the status variable equal to 2. If the status variable is 0, the quantity is
    based on average quantities, and the values listed are no better than an average value. The
    lower the status variable, the less confident you should be in the value.

    '''
    dbase_options = {'QDhourly'    : 1,
                     'OMNI2hourly' : 2,
                     'Mergedhourly': 3,
                     'Test'        : -9,
                     }

    if not isinstance(ticks, spt.Ticktock):
        try:
            ticks = spt.Ticktock(ticks, 'UTC')
        except:
            raise TypeError('get_omni: Input times must be a Ticktock object or a list of datetime objects')

    if not dbase in dbase_options:
        from spacepy import config
        if dbase in config:
            #If a dbase is specified that isn't a default, then it MUST be in the spacepy config
            qdpath = os.path.split(os.path.split(config[dbase])[0])[0]
            if not os.path.isdir(qdpath): raise IOError('Specified dbase ({0}) does not have a valid location ({1})'.format(dbase, config[dbase]))
            days = list(set([tt.date() for tt in ticks.UTC]))
            flist = ['']*len(days)
            fnpath, fnformat = os.path.split(config[dbase])
            for idx, day in enumerate(days):
                dp = fnpath.replace('YYYY', '{0}'.format(day.year))
                df = fnformat.replace('YYYY', '{0}'.format(day.year))
                df = df.replace('MM', '{0:02d}'.format(day.month))
                df = df.replace('DD', '{0:02d}'.format(day.day))
                flist[idx] = os.path.join(dp, df)
            if 'convert' in kwargs:
                convdict = kwargs['convert']
            else:
                convdict = True #set to True as default?
            if 'interp' not in kwargs:
                kwargs['interp'] = True
            data = readJSONheadedASCII(sorted(flist), convert=convdict)
            omniout = SpaceData()

            time_var = [var for var in ['DateTime', 'Time', 'Epoch', 'UTC'] if var in data]
            if time_var:
                use_t_var = time_var[0]
            else:
                #no obvious time variable in input files ... can't continue
                raise ValueError('No clear time variable in file')
            
            if kwargs['interp'] is True:    
                data['RDT'] = spt.Ticktock(data[use_t_var]).RDT
                keylist = sorted(data.keys())
                dum = keylist.pop(keylist.index(use_t_var))
                for key in keylist:
                    try:
                        omniout[key] = dmarray(np.interp(ticks.RDT, data['RDT'], data[key], left=np.NaN, right=np.NaN))
                        omniout[key].attrs = dmcopy(data[key].attrs)
                    except:
                        try:
                            omniout[key] = dmfilled([len(ticks.RDT), data[key].shape[1]], fillval=np.NaN, attrs=dmcopy(data[key].attrs))
                            for col in range(data[key].shape[1]):
                                omniout[key][:,col] = np.interp(ticks.RDT, data['RDT'], data[key][:,col], left=np.NaN, right=np.NaN)
                        except ValueError:
                            print('Failed to interpolate {0} to new time base, skipping variable'.format(key))
                        except IndexError:
                            print('Variable {0} appears to be non-record varying, skipping interpolation'.format(key))
                            omniout[key] = data[key]
                omniout['UTC'] = ticks.UTC 
            else:
                #Trim to specified times
                inds = tOverlapHalf([ticks[0].RDT, ticks[-1].RDT], spt.Ticktock(data['DateTime']).RDT)
                for key in data:
                    if len(inds) == len(data[key]):
                        omniout[key] = data[key][inds]
                    else: #is ancillary data
                        omniout[key] = data[key]
                #TODO: convert to same format as OMNI/QD read (or vice versa)
                omniout['UTC'] = omniout[use_t_var]
            return omniout
        else:
            raise IOError('Specified dbase ({0}) must be specified in spacepy.config'.format(dbase))

    def getattrs(hf, key):
        out = {}
        if hasattr(hf[key],'attrs'):
            for kk, value in hf[key].attrs.items():
                try:
                    out[kk] = value
                except:
                    pass
        return out

    def HrFromDT(indt):
        hour = indt.hour
        minute = indt.minute
        second = indt.second
        musecond = indt.microsecond
        return hour+(minute/60.0)+(second/3600.0)+(musecond/3600.0e3)

    import h5py as h5
    fname, QDkeylist, O2keylist = '', [], []
    omnivals = SpaceData()
    dbase_select = dbase_options[dbase]
    if dbase_select in [1, 3, -9]:
        if dbase_select > 0:
            ldb = 'QDhourly'
            fln = omnifln
        else:
            ldb = 'Test'
            fln = testfln
        with h5.File(fln, 'r') as hfile:
            QDkeylist = [kk for kk in hfile if kk not in ['Qbits', 'UTC']]
            st, en = ticks[0].RDT, ticks[-1].RDT
            ##check that requested requested times are within range of data
            enval, stval = omnirange(dbase=ldb)[1], omnirange(dbase=ldb)[0]
            if (ticks.UTC[0]>enval) or (ticks[-1]<stval):
                raise ValueError('Requested dates are outside data range')
            if (ticks.UTC[-1]>enval) or (ticks[0]<stval):
                print('Warning: Some requested dates are outside data range ({0})'.format(ldb))
            inds = tOverlapHalf([st, en], hfile['RDT'], presort=True) #returns an xrange
            inds = indsFromXrange(inds)
            if inds[0] < 1: inds[0] = 1
            sl_op = slice(inds[0]-1, inds[-1]+2)
    
            fname = ','.join([fname,hfile.filename])
            omnivals.attrs = getattrs(hfile, '/')
            for key in QDkeylist:
                omnivals[key] = dmarray(hfile[key][sl_op]) #TODO: add attrs from h5
                omnivals[key].attrs = getattrs(hfile, key)
            for key in hfile['Qbits']:
                omnivals['Qbits<--{0}'.format(key)] = dmarray(hfile['/Qbits/{0}'.format(key)][sl_op])
                omnivals['Qbits<--{0}'.format(key)].attrs = getattrs(hfile, '/Qbits/{0}'.format(key))
                QDkeylist.append('Qbits<--{0}'.format(key))

    if dbase_options[dbase] == 2 or dbase_options[dbase] == 3:
        ldb = 'OMNI2hourly'
        with h5.File(omni2fln) as hfile:
            O2keylist = [kk for kk in hfile if kk not in ['Epoch','RDT']]
            st, en = ticks[0].RDT, ticks[-1].RDT
            ##check that requested requested times are within range of data
            enval, stval = omnirange(dbase=ldb)[1], omnirange(dbase=ldb)[0]
            if (ticks[0].UTC>enval) or (ticks[-1]<stval):
                raise ValueError('Requested dates are outside data range')
            if (ticks[-1].UTC>enval) or (ticks[0]<stval):
                print('Warning: Some requested dates are outside data range ({0})'.format(ldb))
            inds = tOverlapHalf([st, en], hfile['RDT'], presort=True) #returns an xrange
            inds = indsFromXrange(inds)
            if inds[0] < 1: inds[0] = 1
            sl_op = slice(inds[0]-1, inds[-1]+2)
        
            fname = ','.join([fname,hfile.filename])
            omnivals.attrs = getattrs(hfile, '/') #TODO: This overwrites the previous set on Merged load... Fix!
            omnivals['RDT_OMNI'] = dmarray(hfile['RDT'][sl_op])
            for key in O2keylist:
                omnivals[key] = dmarray(hfile[key][sl_op]) #TODO: add attrs from h5
                omnivals[key].attrs = getattrs(hfile, key)

    if dbase_options[dbase] == 3:
        #prune "merged" SpaceData
        sigmas = [key for key in omnivals if 'sigma' in key]
        for sk in sigmas: del omnivals[sk]
        bees = [key for key in omnivals if re.search('B._', key)]
        for bs in bees: del omnivals[bs]
        aves = [key for key in omnivals if ('_ave' in key) or ('ave_' in key)]
        for av in aves: del omnivals[av]

    omniout = SpaceData(attrs=dmcopy(omnivals.attrs))
    omniout.attrs['filename'] = fname[1:]
    ###print('QDkeys: {0}\n\nO2keys: {1}'.format(QDkeylist, O2keylist))
    for key in sorted(omnivals.keys()):
        if key in O2keylist:
            omniout[key] = dmarray(np.interp(ticks.RDT, omnivals['RDT_OMNI'], omnivals[key], left=np.NaN, right=np.NaN))
            #set metadata -- assume this has been set properly in d/l'd file to match ECT-SOC files
            omniout[key].attrs = dmcopy(omnivals[key].attrs)
        elif key in QDkeylist:
            omniout[key] = dmarray(np.interp(ticks.RDT, omnivals['RDT'], omnivals[key], left=np.NaN, right=np.NaN))
            omniout[key].attrs = dmcopy(omnivals[key].attrs)
        if key == 'G3': #then we have all the Gs
            omniout['G'] = dmarray(np.vstack([omniout['G1'], omniout['G2'], omniout['G3']]).T)
            omniout['G'].attrs = dmcopy(omnivals['G1'].attrs)
            for i in range(1,4): del omniout['G{0}'.format(i)]
        if key == 'W6':
            omniout['W'] = dmarray(np.vstack([omniout['W1'], omniout['W2'], omniout['W3'], omniout['W4'], omniout['W5'], omniout['W6']]).T)
            omniout['W'].attrs = dmcopy(omnivals['W1'].attrs)
            for i in range(1,7): del omniout['W{0}'.format(i)]
        if 'Qbits' in key:
            #Qbits are integer vals, higher is better, so floor to get best representation of interpolated val
            omniout[key] = np.floor(omnivals[key]) 
            omniout[key].attrs = dmcopy(omnivals[key].attrs)
            if 'G3' in key: #then we have all the Gs
                omniout['Qbits<--G'] = dmarray(np.vstack([omniout['Qbits<--G1'], omniout['Qbits<--G2'], omniout['Qbits<--G3']]).T)
                for i in range(1,4): del omniout['Qbits<--G{0}'.format(i)]
            if 'W6' in key:
                omniout['Qbits<--W'] = dmarray(np.vstack([omniout['Qbits<--W1'], omniout['Qbits<--W2'], omniout['Qbits<--W3'], omniout['Qbits<--W4'], omniout['Qbits<--W5'], omniout['Qbits<--W6']]).T)
                for i in range(1,7): del omniout['Qbits<--W{0}'.format(i)]

    omniout['ticks'] = ticks
    omniout['UTC'] = ticks.UTC
    omniout['Hr'] = dmarray([HrFromDT(val) for val in omniout['UTC']])
    omniout['Year'] = dmarray([val.year for val in omniout['UTC']])
    omniout = unflatten(omniout)

    return omniout
示例#19
0
def getKondrashovSW(
        fn='../ref_data/1989/Kondrashov_SSA_19890312_19890314.txt'):
    data = dm.readJSONheadedASCII(fn, convert=True)
    return data
示例#20
0
               bcvals=coord.Coords([[GEOBx[igp],GEOBy[igp],GEOBz[igp]]], 'GEO', 'car')
               cvals.ticks=Ticktock([ctime], 'UTC')
               bcvals.ticks=Ticktock([ctime], 'UTC')
               new_coord=cvals.convert(desired_coords[ic],'car')
               newB_coord=bcvals.convert(desired_coords[ic], 'car')
               XGSE[igp]=new_coord.x; YGSE[igp]=new_coord.y; ZGSE[igp]=new_coord.z
               Bx[igp]=newB_coord.x; By[igp]=newB_coord.y; Bz[igp]=newB_coord.z

            
    #
    # now we have the interpolated pressures... get the Kp, L, and MLT
    # from HOPE Ephemeris
    #
    os.chdir('/Users/loisks/Desktop/liemohn10/loisks/EMPHEMERIS_'+sats[iSat])
    f2=glob.glob('rbsp'+lsats[iSat]+'_def_MagEphem_OP77Q_'+date1+'*.txt')[0]
    pyf2=dm.readJSONheadedASCII(f2)
    # get params
    LShell=np.array(np.nanmean(np.array(pyf2['L']), axis=1)) # weird multiple rows with L
    MLT=np.array(pyf2['CDMAG_MLT'])
    Kp=np.array(KpArr[date1]) # in minutes 
    #
    # now we sort and compare
    # 0.5 MLT, 0.25 L, and Kp =1 bins up to kp 9   
    for iKp in range(9):
        for iMLT in range(nMLT):
            for iL in range(nL):
                 
                   temp=np.where((MLT >= MLTbins[iMLT]-0.25) & (MLT < MLTbins[iMLT]+0.25))[0]
                   temp2=np.where((LShell >= Lbins[iL]-.125) & (LShell < Lbins[iL]+0.125))[0]
                   temp3=np.where(( Kp >= iKp) & ( Kp < iKp+1))[0]
                   # get the set
示例#21
0
                    XGSE[igp] = new_coord.x
                    YGSE[igp] = new_coord.y
                    ZGSE[igp] = new_coord.z
                    Bx[igp] = newB_coord.x
                    By[igp] = newB_coord.y
                    Bz[igp] = newB_coord.z

            #
            # now we have the interpolated pressures... get the Kp, L, and MLT
            # from HOPE Ephemeris
            #
            os.chdir('/Users/loisks/Desktop/liemohn10/loisks/EMPHEMERIS_' +
                     sats[iSat])
            f2 = glob.glob('rbsp' + lsats[iSat] + '_def_MagEphem_OP77Q_' +
                           date1 + '*.txt')[0]
            pyf2 = dm.readJSONheadedASCII(f2)
            # get params
            LShell = np.array(np.nanmean(np.array(pyf2['L']),
                                         axis=1))  # weird multiple rows with L
            MLT = np.array(pyf2['CDMAG_MLT'])
            Kp = np.array(KpArr[date1])  # in minutes
            #
            # now we sort and compare
            # 0.5 MLT, 0.25 L, and Kp =1 bins up to kp 9
            for iKp in range(9):
                for iMLT in range(nMLT):
                    for iL in range(nL):

                        temp = np.where((MLT >= MLTbins[iMLT] - 0.25)
                                        & (MLT < MLTbins[iMLT] + 0.25))[0]
                        temp2 = np.where((LShell >= Lbins[iL] - .125)
endDt=datetime.datetime.strptime(dateEnd,'%Y%m%d')
DT=datetime.datetime.strptime(date, '%Y%m%d')
sat=['A', 'B']
aeList=[]
alList=[]
aeTimeList=[]
potentialList=[]
dateArr=[]
threshold=-10
#
# get all the AE data
# first get 2013
os.chdir('/Users/loisks/Documents/ResearchProjects/ChargingProject/AEindex/AE_121314/2013')
f=glob.glob('*')
for iFile in range(len(f)):
    data=dm.readJSONheadedASCII(f[iFile])
    aeList+=list(data['AE'])
    aeTimeList+=list(data['DateTime'])
    alList+=list(data['AL'])
os.chdir('/Users/loisks/Documents/ResearchProjects/ChargingProject/AEindex/AE_121314/2014')
f=glob.glob('*')
for iFile in range(len(f)):
    data=dm.readJSONheadedASCII(f[iFile])
    aeList+=list(data['AE'])
    aeTimeList+=list(data['DateTime'])
    alList+=list(data['AL'])
os.chdir('/Users/loisks/Documents/ResearchProjects/ChargingProject/')
#
AETimes=np.zeros(len(aeTimeList))
# now have complete lists of times and indexes
for iTime in range(len(aeTimeList)):