コード例 #1
0
ファイル: chapman.py プロジェクト: irbdavid/celsius
def get_f10_7(times, fname=None, return_all=False):
    if not fname:
        fname = os.path.expandvars('$SC_DATA_DIR/omni/omni2_daily_19560.lst')

    data = np.loadtxt(fname).T
    years = dict([(k, celsius.spiceet('%d-001T00:00' % k)) for k in np.unique(data[0])])
    time = np.array([years[d] + (v-1.) * 86400. for d, v in zip(data[0], data[1])])
    if return_all:
        return time, 1.0 * data[3]
    return np.interp(times, time, data[3], left=np.nan, right=np.nan)
コード例 #2
0
ファイル: maven_sc.py プロジェクト: irbdavid/maven
def read_maven_orbits(fname):
    """docstring for read_maven_orbits"""

    print('Reading %s ... ' % fname, end=' ')

    orbit_list = celsius.OrbitDict()

    f = open(fname, 'r')

    # skip headers
    f.readline()
    f.readline()

    # Lockheed putting in the periapsis of the orbit, and the terminating apoapsis, i.e. wrong way round
    last_apoapsis = np.nan

    for line in f.readlines():
        if "Unable to determine" in line:
            print("Detected 'Unable to determine' (last orbit bounds error?)")
            continue

        try:
            number = int(line[0:5])
            apoapsis = celsius.spiceet(line[51:71])
            periapsis = celsius.spiceet(line[7:27])

            if ~np.isfinite(last_apoapsis):
                this_apo = periapsis - 0.00000001 # A hack.  Don't want a NaN in there.
                # First orbit will start at periapsis, effectively
            else:
                this_apo = last_apoapsis
            last_apoapsis = apoapsis
            m = celsius.Orbit(number=number, start=this_apo,
                    periapsis=periapsis, apoapsis=apoapsis, name='MAVEN')

            orbit_list[number] = m
        except ValueError as e:
            print(e)
            raise

    print(' read %d orbits (MAX = %d)' % (len(orbit_list), number))
    return orbit_list
コード例 #3
0
ファイル: maven_sc.py プロジェクト: irbdavid/maven
import celsius
import numpy as np
import os
from glob import glob, iglob

# import spiceypy
import spiceypy

KERNELS_LOADED = False

# NOMINAL_INSERTION_DATE = spiceet("2014 10 27")
INSERTION_DATE = celsius.spiceet('2014-09-22T02:24')
LAUNCH_DATE = celsius.spiceet('2013-11-17T18:28')

# nb: Public SDC access:
# http://lasp.colorado.edu/maven/sdc/public/data/sci


REQUIRED_KERNELS = [
        'lsk/naif*.tls',
        'fk/maven_*.tf',
        'pck/pck*.tpc',
        'sclk/MVN_SCLKSCET.*.tsc',
        'spk/de421.bsp',
        'spk/de430s.bsp',
        'spk/mar097s.bsp',
        'spk/maven_orb.bsp',
        'spk/maven_orb_rec.bsp',
        # 'RSSD0002.TF',
                ]
コード例 #4
0
ファイル: ngims.py プロジェクト: irbdavid/maven
def load_ngims_l2(start, finish, kind='ion', species='all',
        http_manager=None, delete_others=True, cleanup=False, verbose=None):
    kind = kind.lower()

    if not delete_others:
        raise RuntimeError("Not written yet")

    if http_manager is None:
        http_manager = sdc_interface.maven_http_manager

    if species is 'all':
        species = list(ION_MASSES.keys())

    t = start
    year, month = celsius.utcstr(t,'ISOC').split('-')[0:2]
    year = int(year)
    month = int(month)
    #  Each month:
    files = []
    while t < finish:
        files.extend(
                http_manager.query(
                    'ngi/l2/%04d/%02d/mvn_ngi_l2_%s-abund-*_v05_r*.csv' % \
                                            (year, month, kind),
                    start=start, finish=finish,
                    version_function=\
                        # lambda x: (x[0], float(x[1]) + float(x[2])/100.),
                        lambda x: (x[0], 5. + float(x[1])/100.),
                    date_function=\
                        lambda x: sdc_interface.yyyymmdd_to_spiceet(x[0].split('_')[1]),
                    cleanup=cleanup, verbose=verbose
                )
            )
        month += 1
        if month > 12:
            month = 0o1
            year += 1
        t = celsius.spiceet('%d-%02d-01T00:00' % (year, month))

    if cleanup:
        print('NGIMS L2 cleanup complete')

    if not files:
        raise IOError("No NGIMS data found")

    for f in sorted(files):
        if not os.path.exists(f):
            raise IOError("%s does not exist" % f)

    if kind == 'ion':
        output = {'time':None, 'total':None}
        for s in species:
            output[s] = None

        for f in sorted(files):
            if verbose:
                print(f)
            d = np.loadtxt(f, skiprows=1, delimiter=',', usecols=(0,12,14),
                    converters={0: lambda x: celsius.spiceet(x),
                                7: lambda x: float(x or 'NaN'),
                                9: lambda x: float(x or 'NaN')}).T
            count = None

            for s in species:
                mass = ION_MASSES[s]
                inx, = np.where(d[1] == mass)
                if count is None:
                    count = inx.size
                else:
                    if count != inx.size:
                        raise ValueError("Malformed file?")

                if output[s] is None:
                    output[s] = d[2,inx]
                else:
                    output[s] = np.hstack((output[s], d[2,inx]))

            if output['time'] is None:
                output['time'] = d[0, inx]
            else:
                output['time'] = np.hstack((output['time'], d[0, inx]))

    else:
        raise ValueError("Input kind='%s' not recognized" % kind)

    output['time'] = output['time']# + celsius.spiceet("1970-01-01T00:00")

    return output
コード例 #5
0
ファイル: ngims.py プロジェクト: irbdavid/maven
def cleanup(start=None, finish=None):
    if not start: start = celsius.spiceet("2014-09-22T00:00")
    if not finish: finish = celsius.now()

    # Cleanup commands
    load_ngims_l2_summary(start, finish, cleanup=True, verbose=True)
コード例 #6
0
ファイル: ngims.py プロジェクト: irbdavid/maven
        raise ValueError("Input kind='%s' not recognized" % kind)

    output['time'] = output['time']# + celsius.spiceet("1970-01-01T00:00")

    return output

def cleanup(start=None, finish=None):
    if not start: start = celsius.spiceet("2014-09-22T00:00")
    if not finish: finish = celsius.now()

    # Cleanup commands
    load_ngims_l2_summary(start, finish, cleanup=True, verbose=True)

if __name__ == '__main__':
    plt.close('all')
    t0 = celsius.spiceet("2015-04-30T00:00")
    # t1 = t0 + 86400. * 2. + 1
    t1 = t0 + 86400. - 1.

    # d = load_ngims_l2_summary(t0, t1, kind='onboardsvyspec')
    # plot_ngims_l2_summary(d)

    d = load_ngims_l2(t0, t1, kind='ion')

    for s in list(ION_MASSES.keys()):
        plt.plot(d['time'], d[s])

    plt.yscale('log')

    celsius.setup_time_axis()
コード例 #7
0
ファイル: mex_sc.py プロジェクト: irbdavid/mex
def load_kernels(time=None, force=False, verbose=False,
                load_all=False, keep_previous=False):
    """Load spice kernels, with a stateful thing to prevent multiple calls"""
    # global last_spice_time_window
    last_spice_time_window = getattr(spiceypy, 'last_spice_time_window', 'MEX:NONE')

    if load_all:
        # Launch to now + 10 yrs
        start = celsius.spiceet("2003-06-01T00:00")
        finish = celsius.spiceet(mex.now() + 10.*86400.*365.)

    if time is None:
        start = None
        finish = None
        start_str = 'NO_TIME_SET'
        finish_str = ''
        start_int=-999999
        finish_int=-999999
    else:
        if hasattr(time, '__len__'):
            start = time[0]
            finish = time[-1]

        else:
            start = time
            finish = time
        start_str = celsius.utcstr(start, 'ISOC')
        finish_str = celsius.utcstr(finish, 'ISOC')
        start_int = int(start_str[2:4] + start_str[5:7] + '01')
        finish_int = int(finish_str[2:4] + finish_str[5:7] + '01')
        start_str = '%06d' % start_int
        finish_str = '%06d' % finish_int

    this_spice_time_window = start_str + finish_str

    if not 'NONE' in last_spice_time_window:
        if last_spice_time_window == this_spice_time_window:
            if verbose:
                print('LOAD_KERNELS: Interval unchanged')
            return

        if keep_previous:
            if verbose:
                print('LOAD_KERNELS: Keeping loaded kernels')
            return

    spiceypy.last_spice_time_window = 'MEX:'+this_spice_time_window

    spiceypy.kclear()

    try:
        kernel_directory = mex.data_directory + 'spice'
        if verbose:
            print('LOAD_KERNELS: Registering kernels:')

        for k in REQUIRED_KERNELS:

            if '*' in k:
                files = glob.glob(kernel_directory + k)
                m = -1
                file_to_load = ''
                for f in files:
                    t = os.path.getmtime(f)
                    if t > m:
                        m = t
                        file_to_load = f
                if verbose:
                    print(file_to_load)
                if not file_to_load:
                    raise IOError("No matches found for %s" % k)
                spiceypy.furnsh(file_to_load)

            else:
                spiceypy.furnsh(kernel_directory + k)
                if verbose: print(kernel_directory + k)

        if start_int > -999999:
            # Load time-sensitive kenrels
            for f in glob.iglob(kernel_directory + '/spk/ORMM_T19_*.BSP'):
                this_int = int(f.split('_T19_')[1][:6])
                if this_int < start_int: continue
                if this_int > finish_int: continue
                spiceypy.furnsh(f)
                if verbose: print(f)

    except Exception as e:
        spiceypy.kclear()
        spiceypy.last_spice_time_window = 'MEX:NONE_ERROR'
        raise
コード例 #8
0
ファイル: static.py プロジェクト: irbdavid/maven
def load_static_l2(start, finish, kind='c0',
        http_manager=None, delete_others=True, cleanup=False, verbose=None):
    kind = kind.lower()

    full_kind = STATIC_PRODUCTS[kind]

    if http_manager is None:
        http_manager = sdc_interface.maven_http_manager

    t = start
    year, month = celsius.utcstr(t,'ISOC').split('-')[0:2]
    year = int(year)
    month = int(month)
    #  Each month:
    files = []
    while t < finish:
        files.extend(
                http_manager.query(
                    'sta/l2/%04d/%02d/mvn_sta_l2_%s_*_v*_r*.cdf' % \
                                            (year, month, full_kind),
                    start=start, finish=finish,
                    version_function=\
                        lambda x: (x[0], float(x[1]) + float(x[2])/100.),
                    date_function=lambda x: sdc_interface.yyyymmdd_to_spiceet(x[0]),
                    cleanup=cleanup, verbose=verbose
                )
            )
        month += 1
        if month > 12:
            month = 0o1
            year += 1
        t = celsius.spiceet('%d-%02d-01T00:00' % (year, month))

    if cleanup:
        print('static L2 Cleanup complete')
        return

    for f in sorted(files):
        if not os.path.exists(f):
            raise IOError("%s does not exist" % f)
    #
    # if kind == 'c6':
    #     output = {'time':None, 'eflux':None, 'static_kind':'c6'}
    #     for f in sorted(files):
    #         c = pycdf.CDF(f)
    #
    #         if output['time'] is None:
    #             output['time'] = np.array(c['time_unix'])
    #             output['eflux']  = np.array(c['eflux']).T
    #
    #             output['energy']  = np.array(c['energy'][0,:,0])
    #             output['mass']  = np.array(c['mass_arr'][:,0,0])
    #
    #         else:
    #             output['time'] = np.hstack((output['time'],
    #                                 np.array(c['time_unix'])))
    #             output['eflux']  = np.hstack((output['time'],
    #                                 np.array(c['eflux'].T)))
    #
    #             # if output['energy'].shape != c['energy'].shape[1]:
    #             #     raise ValueError("Energy range has changed!")
    #             #
    #             # if output['mass'].shape != c['mass_arr'].shape[0]:
    #             #     raise ValueError("Mass range has changed!")
    #
    #         c.close()

    if kind == 'c0':
        t0 = celsius.spiceet("1970-01-01T00:00")
        output = {'blocks':[], 'static_kind':'c0'}
        for f in sorted(files):
            c = pycdf.CDF(f)

            data = np.array(c['eflux'])
            last_ind = None
            last_block_start = None
            N = data.shape[0]

            for i in range(data.shape[0]):

                if last_ind is None:
                    last_ind = c['swp_ind'][i]
                    last_block_start = i

                if (c['swp_ind'][i] != last_ind) or (i == N):

                    img = data[last_block_start:i-1, :, :].sum(axis=1)
                    extent = (
                            c['time_unix'][last_block_start] + t0,
                            c['time_unix'][i-1] + t0,
                            c['energy'][0, -1, last_ind],
                            c['energy'][0, 0, last_ind],
                        )
                    # print(last_ind, extent)
                    output['blocks'].append((extent, (img.T[::-1,:])))
                    # plt.imshow(np.log10(img.T[::-1,:]), extent=extent,
                    #             origin='lower', interpolation='nearest')
                    last_ind = None

            c.close()


    else:
        raise ValueError("Input kind='%s' not recognized" % kind)

    # output['time'] = output['time'] + celsius.spiceet("1970-01-01T00:00")

    return output
コード例 #9
0
ファイル: sdc_interface.py プロジェクト: irbdavid/maven
def yyyymmdd_to_spiceet(x):
    return celsius.spiceet(x[:4] + '-' + x[4:6] + '-' + x[6:8] + 'T00:00')
コード例 #10
0
ファイル: swia.py プロジェクト: irbdavid/maven
def load_swia_l2_summary(start, finish, kind='onboardsvymom',
        http_manager=None, delete_others=True, cleanup=False, verbose=None):
    kind = kind.lower()

    if not delete_others:
        raise RuntimeError("Not written yet")

    if http_manager is None:
        http_manager = sdc_interface.maven_http_manager

    t = start
    year, month = celsius.utcstr(t,'ISOC').split('-')[0:2]
    year = int(year)
    month = int(month)
    #  Each month:
    files = []
    while t < finish:
        files.extend(
                http_manager.query(
                    'swi/l2/%04d/%02d/mvn_swi_l2_%s_*_v*_r*.cdf' % \
                                            (year, month, kind),
                    start=start, finish=finish,
                    version_function=\
                        lambda x: (x[0], float(x[1]) + float(x[2])/100.),
                    date_function=lambda x: sdc_interface.yyyymmdd_to_spiceet(x[0]),
                    cleanup=cleanup, verbose=verbose
                )
            )
        month += 1
        if month > 12:
            month = 0o1
            year += 1
        t = celsius.spiceet('%d-%02d-01T00:00' % (year, month))

    if cleanup:
        print('SWIA L2 Cleanup complete')
        return

    for f in sorted(files):
        if not os.path.exists(f):
            raise IOError("%s does not exist" % f)


    if kind == 'onboardsvyspec':
        output = {'time':None, 'def':None}
        for f in sorted(files):
            c = pycdf.CDF(f)

            if output['time'] is None:
                output['time'] = np.array(c['time_unix'])
                output['def']  = np.array(c['spectra_diff_en_fluxes']).T

                # Some weird formatting here:
                output['energy']  = np.array(
                    [c['energy_spectra'][i] for i in range(c['energy_spectra'].shape[0])]
                )
                output['energy'] = output['energy'][::-1]
            else:
                output['time'] = np.hstack((output['time'],
                                    np.array(c['time_unix'])))
                output['def'] = np.hstack((output['def'],
                                    np.array(c['spectra_diff_en_fluxes']).T))

                if output['energy'].shape != c['energy_spectra'].shape:
                    raise ValueError("Energy range has changed!")

            c.close()
        output['def'] = output['def'][::-1,:]

    elif kind == 'onboardsvymom':
        output = {'time':None, 'velocity':None, 'density':None,
                                'temperature':None, 'quality_flag':None}
        for f in sorted(files):
            c = pycdf.CDF(f)

            if output['time'] is None:
                output['time'] = np.array(c['time_unix'])
                output['quality_flag'] = np.array(c['quality_flag'])
                output['density'] = np.array(c['density'])
                output['velocity'] = np.array(c['velocity']).T
                output['temperature'] = np.array(c['temperature']).T
            else:
                merge_attrs(output, 'time', c, 'time_unix')
                merge_attrs(output, 'quality_flag', c)
                merge_attrs(output, 'density', c)

                merge_attrs(output, 'velocity', c, transpose=True)
                merge_attrs(output, 'temperature', c, transpose=True)

            c.close()

    else:
        raise ValueError("Input kind='%s' not recognized" % kind)

    output['time'] = output['time'] + celsius.spiceet("1970-01-01T00:00")

    return output
コード例 #11
0
ファイル: kp.py プロジェクト: irbdavid/maven
def load_kp_data(start, finish, vars='ALL', truncate_time=True,
        http_manager=None, cleanup=False, verbose=None):
    """Reads MAVEN kp data into a structure.  Downloads / syncs if neccessary.
    Args:
        start, finish: SPICEET times
        vars: variable names to store (not implemented - default ALL)
        http_manager: connection to use
        cleanup: if True, no data will be downloaded or returned, and instead
            only superceded local files will be deleted
        verbose: locally overrides http_manager.verbose
        truncate_time: slice out only those points between start and finish,
            or return whole days if false

    Returns:
        results of kp_read_files for the located files
    """

    if http_manager is None:
        http_manager = sdc_interface.maven_http_manager

    t = start
    year, month = celsius.utcstr(t,'ISOC').split('-')[0:2]
    year = int(year)
    month = int(month)
    #  Each month:
    files = []
    while t < finish:
        files.extend(
                http_manager.query(
                    'kp/insitu/%04d/%02d/mvn_kp_insitu_*_v*_r*.tab' % \
                                            (year, month),
                    start=start, finish=finish,
                    version_function=\
                        lambda x: (x[0], float(x[1]) + float(x[2])/100.),
                    date_function=lambda x: sdc_interface.yyyymmdd_to_spiceet(x[0]),
                    cleanup=cleanup, verbose=verbose
                )
            )
        month += 1
        if month > 12:
            month = 0o1
            year += 1
        t = celsius.spiceet('%d-%02d-01T00:00' % (year, month))

    if cleanup:
        print('KP cleanup complete')
        return

    for f in sorted(files):
        if not os.path.exists(f):
            raise IOError("%s does not exist" % f)

    if not files:
        raise IOError("No KP data found")

    data = kp_read_files(files)

    if truncate_time:
        inx, = np.where((data.time > start) & (data.time < finish))
        for k in list(data.keys()):
            if k not in ('time', 'descriptions'):
                for kk in list(data[k].keys()):
                    data[k][kk] = data[k][kk][inx]

    data['time'] = data['spacecraft']['time'] # a link

    return data
コード例 #12
0
ファイル: lpw.py プロジェクト: irbdavid/maven
def lpw_l2_load(start, finish, kind='lpnt', http_manager=None, cleanup=False,
                    verbose=None):
    """Finds and loads LPW L2 data"""

    if http_manager is None: http_manager = sdc_interface.maven_http_manager
    kind = kind.lower()

    t = start
    year, month = celsius.utcstr(t,'ISOC').split('-')[0:2]
    year = int(year)
    month = int(month)
    #  Each month:
    files = []
    while t < finish:
        # print year, month
        files.extend(
                http_manager.query(
                    'lpw/l2/%04d/%02d/mvn_lpw_l2_%s_*_v*_r*.cdf' % \
                                            (year, month, kind),
                    start=start, finish=finish,
                    version_function=\
                        lambda x: (x[0], float(x[1]) + float(x[2])/100.),
                    date_function=lambda x:
                                sdc_interface.yyyymmdd_to_spiceet(x[0]),
                    verbose=verbose
                )
            )
        month += 1
        if month > 12:
            month = 1
            year += 1
        t = celsius.spiceet('%d-%02d-01T00:00' % (year, month))

    if cleanup:
        print('LPW L2 cleanup complete')
        return

    if not files:
        raise IOError("No data found")

    for f in sorted(files):
        if not os.path.exists(f):
            raise IOError("%s does not exist" % f)


    if kind == 'lpnt':
        output = dict(time=None, ne=None, te=None, usc=None)
        for f in sorted(files):
            c = pycdf.CDF(f)
            if output['time'] is None:
                # inx =
                output['time'] = np.array(c['time_unix'])
                output['ne'] = np.array(c['data'][:,0])
                output['te'] = np.array(c['data'][:,1])
                output['usc'] = np.array(c['data'][:,2])
            else:
                output['time'] = np.hstack((output['time'],
                    np.array(c['time_unix'])))

                for v, i in zip(('ne', 'te', 'usc'), (0,1,2)):
                    output[v] = np.hstack((output[v], np.array(c['data'][:,i])))
            c.close()

    elif kind == 'wn':
        output = dict(time=None, ne=None)
        for f in sorted(files):
            print(f)
            c = pycdf.CDF(f)
            if output['time'] is None:
                # inx =
                output['time'] = np.array(c['time_unix'])
                output['ne'] = np.array(c['data'])
            else:
                output['time'] = np.hstack((output['time'],
                    np.array(c['time_unix'])))
                output['ne'] = np.hstack((output['ne'],
                    np.array(c['data'])))

                # for v, i in zip(('ne', 'te', 'usc'), (0,1,2)):
                #     output[v] = np.hstack((output[v], np.array(c['data'][:,i])))
            c.close()


    elif kind == 'wspecact':
        output = dict(time=None, spec=None, freq=None)
        for f in sorted(files):
            print(f)
            c = pycdf.CDF(f)

            if output['time'] is None:
                output['time'] = np.array(c['time_unix'])
                output['spec'] = np.array(c['data']).T
                output['freq'] = np.array(c['freq'][0,:])
            else:
                output['time'] = np.hstack((output['time'],
                                np.array(c['time_unix'])))
                output['spec'] = np.hstack((output['spec'],
                                np.array(c['data']).T))
            c.close()

        # print 'Warning: spectra output is not interpolated!'

    elif kind == 'wspecpas':
        output = dict(time=None, spec=None, freq=None)
        for f in sorted(files):
            print(f)
            c = pycdf.CDF(f)

            if output['time'] is None:
                output['time'] = np.array(c['time_unix'])
                output['spec'] = np.array(c['data']).T
                output['freq'] = np.array(c['freq'][0,:])
            else:
                output['time'] = np.hstack((output['time'],
                                np.array(c['time_unix'])))
                output['spec'] = np.hstack((output['spec'],
                                np.array(c['data']).T))
        # print 'Warning: spectra output is not interpolated!'
            c.close()

    elif kind == 'lpiv':
        output = dict(time=None, current=None, volt=None)
        for f in sorted(files):
            c = pycdf.CDF(f)

            if output['time'] is None:
                output['time'] = np.array(c['time_unix'])
                output['current'] = [np.array(c['data']).T]
                output['volt'] = [np.array(c['volt']).T]
            else:
                output['time'] = np.hstack((output['time'],
                                np.array(c['time_unix'])))
                output['current'].append(np.array(c['data']).T)
                output['volt'].append(np.array(c['volt']).T)

            c.close()

    else:
        raise ValueError("Input kind='%s' not recognized" % kind)

    output['time'] = output['time'] + celsius.spiceet("1970-01-01T00:00")
    return output
コード例 #13
0
ファイル: lpw.py プロジェクト: irbdavid/maven
    return img, img_obj, cbar

def cleanup(start=None, finish=None):
    if not start: start = celsius.spiceet("2014-09-22T00:00")
    if not finish: finish = celsius.now()

    # Cleanup commands
    lpw_l2_load(start, finish, cleanup=True, verbose=True)

if __name__ == '__main__':

    if False:
        import maven
        import mex

        t0 = celsius.spiceet("2015-01-07T00:00")
        c = get_hf_act_densities(t0, t0 + 86400.*2., verbose=True)
        print(list(c.keys()))
        inx = c['confidence'] > 95.

        # plt.close('all')
        fig, axs = plt.subplots(3,1, sharex=True)

        plt.sca(axs[0])
        plt.plot(c['time'][inx], c['density'][inx], 'r.')

        # plt.plot(mexdata['time'], mexdata['ne'], 'b.')
        plt.ylabel("ne / cm^-3")

        plt.yscale('log')
コード例 #14
0
ファイル: field_models.py プロジェクト: irbdavid/celsius
        print(mex.mars_mean_radius_km)
        # r = np.zeros(100) + 1.001 * mex.mars_mean_radius_km
        # lon = np.linspace(0., 360., r.shape[0])
        # lat = np.zeros_like(r) - 60.
        # field = a(np.vstack((r, 90.0 - lat, lon)))
        fig = plt.figure(figsize=(8,6))

        orb = mex.orbits[10470]
        et = np.linspace(orb.periapsis-3600., orb.periapsis+3600., 1000.)



        # Duru 06a
        if comparison == 'Duru':
            et = np.linspace(
                celsius.spiceet("2005-224T04:45:00"), celsius.spiceet("2005-224T05:15:00"), 100)

        # Fraenz 10a
        if comparison == 'Fraenz':
            orb = mex.orbits[5009]
            et = np.linspace(-7 * 60., 7 * 60., 100) + orb.periapsis

        pos = mex.iau_r_lat_lon_position(et)
        # pos[0,:] += mex.mars_mean_radius_km
        # pos = np.empty_like(p)
        # pos[2,:] = -np.rad2deg(p[1,:])
        # pos[1,:] = np.rad2deg(p[2,:])
        # pos[0,:] = p[0,:]

        if comparison == 'Duru':
            pos[0,:] = np.zeros_like(pos[0,:]) + mex.mars_mean_radius_km + 150.
コード例 #15
0
ファイル: static.py プロジェクト: irbdavid/maven
    if colorbar:
        plt.colorbar(cax=celsius.make_colorbar_cax()).set_label('static D.E.F.')

    return imgs

def cleanup(start=None, finish=None):
    if not start: start = celsius.spiceet("2014-09-22T00:00")
    if not finish: finish = celsius.now()

    # Cleanup commands
    load_static_l2_summary(start, finish, cleanup=True)

if __name__ == '__main__':
    plt.close('all')
    t0 = celsius.spiceet("2015-01-08")
    t1 = t0 + 86400. * 2. + 1
    t1 = t0 + 86400. - 1.

    # d = load_static_l2_summary(t0, t1, kind='onboardsvyspec')
    # plot_static_l2_summary(d)

    d = load_static_l2_summary(t0, t1, kind='c6')
    plt.subplot(211)
    plt.plot(d['time'], d['density'])
    plt.subplot(212)
    plt.plot(d['time'], d['velocity'][0], 'r.')
    plt.plot(d['time'], d['velocity'][1], 'g.')
    plt.plot(d['time'], d['velocity'][2], 'b.')

    celsius.setup_time_axis()
コード例 #16
0
ファイル: maven_sc.py プロジェクト: irbdavid/maven
def load_kernels(time=None, force=False, verbose=False,
                load_all=False, keep_previous=False):
    """Load spice kernels, with a stateful thing to prevent multiple calls"""
    last_spice_time_window = getattr(spiceypy,
            'last_spice_time_window', 'MVN:NONE')

    if load_all:
        # Launch to now + 10 yrs
        start = celsius.spiceet("2013-11-19T00:00")
        finish = celsius.spiceet(celsius.now() + 10.*86400.*365.)

    if time is None:
        start = None
        finish = None
        start_str = 'NO_TIME_SET'
        finish_str = ''
        start_int=-999999
        finish_int=-999999
    else:
        if hasattr(time, '__len__'):
            start = time[0]
            finish = time[-1]

        else:
            start = time
            finish = time
        start_str = celsius.utcstr(start, 'ISOC')
        finish_str = celsius.utcstr(finish, 'ISOC')
        start_int = int(start_str[2:4] + start_str[5:7] + '01')
        finish_int = int(finish_str[2:4] + finish_str[5:7] + '01')
        start_str = '%06d' % start_int
        finish_str = '%06d' % finish_int

    this_spice_time_window = start_str + finish_str

    if not 'NONE' in last_spice_time_window:
        if last_spice_time_window == this_spice_time_window:
            if verbose:
                print('LOAD_KERNELS [MVN]: Interval unchanged')
            return

        if keep_previous:
            if verbose:
                print('LOAD_KERNELS [MVN]: Keeping loaded kernels')
            return

    spiceypy.last_spice_time_window = 'MVN:' + this_spice_time_window

    spiceypy.kclear()

    try:
        kernel_directory = os.getenv('MAVEN_KERNEL_DIR')
        if verbose:
            print('LOAD_KERNELS [MVN]: Registering kernels:')

        for k in REQUIRED_KERNELS:

            if '*' in k:
                files = glob(kernel_directory + k)
                m = -1
                file_to_load = ''
                for f in files:
                    t = os.path.getmtime(f)
                    if t > m:
                        m = t
                        file_to_load = f
                if verbose:
                    print(file_to_load)
                if file_to_load:
                    spiceypy.furnsh(file_to_load)
                else:
                    raise IOError("No match for %s" % k)

            else:
                spiceypy.furnsh(kernel_directory + k)
                if verbose: print(kernel_directory + k)

        # time sensitive kernels
        load_count = 0

        # used to determine whether or not to load the most recent, unnumbered
        # rolling update kernel
        max_encountered = -99999

        if start_int > -999999:
            # Load time-sensitive kenrels
            for f in iglob(kernel_directory + 'spk/maven_orb_rec_*.bsp'):
                this_start = int(f.split('_')[3])
                this_finish = int(f.split('_')[4])
                if this_finish < start_int: continue
                if this_start > finish_int: continue
                spiceypy.furnsh(f)
                load_count += 1
                if verbose: print(f)

                if this_start > max_encountered: max_encountered = this_start
                if this_finish > max_encountered: max_encountered = this_finish

            if max_encountered < finish_int:
                # load the rolling-update kernel too
                f = kernel_directory + 'spk/maven_orb_rec.bsp'
                # spiceypy.furnsh(f)
                load_count += 1
                if verbose: print(f)

            if load_count == 0:
                raise IOError("No kernels matched for time period")

    except Exception as e:
        spiceypy.kclear()
        spiceypy.last_spice_time_window = 'MVN:NONE_ERROR'
        raise

    print('LOAD_KERNELS [MVN]: Loaded %s' % spiceypy.last_spice_time_window)
コード例 #17
0
ファイル: sdc_interface.py プロジェクト: irbdavid/maven
    def query(self, query, version_function=None, date_function=None,
                start=None, finish=None, cleanup=False, verbose=None,
                silent=None):
        """Takes a query, returns a list of local files that match.

Will first query the remote server, download missing files, and then delete local files that match the query but are no longer present on the remote. The implicit assumption here is that the remote directory is PERFECTLY maintained.

Args:
    query: query string with wildcards to locate a file on the remote server,
        e.g. 'sci/lpw/l2/2015/01/mvn_lpw_l2_lpnt_*_v*_r*.cdf'

    version_function: takes the expanded wildcards from the query, and converts
        them to a number used to compare versions and releases (higher=better).
        For example:
            lambda x: (x[0], float(x[1]) + float(x[2])/100.)
        to generate 1.02 for V1, R2 for the above query (2nd and 3rd wildcards)

    date_function: takes the expanded wildcards from the query, and converts to
        a date for the content of the file, for example:
            lambda x: yyyymmdd_to_spiceet(x[0])
        for the above query example.

    start: start date SPICEET, ignored if not set
    finish: finish date, ignored if not set. 'finish' must be set if 'start' is
        (can use np.inf, if you want)

Returns: List of local files, freshly downloaded if necessary, that satisfy the
    query supplied.
        """

        file_list = []
        split_query = query.split('/')
        query_base_path = '/'.join(split_query[:-1]) + '/'
        query_filename  = split_query[-1]

        if verbose is None: verbose = self.verbose
        if silent is None: silent = self.silent

        if version_function is None:
            version_function = lambda x: 0, ''.join(x)

        self.current_re = re.compile(query_filename.replace("*", "(\w*)"))

        if not os.path.exists(self.local_path + query_base_path):
            os.makedirs(self.local_path + query_base_path)

        check_time = False
        if start or finish: check_time = True

        if check_time and (date_function is None):
            raise ValueError("Start and finish are set, but date_function is not")

        if check_time:
            start_day = celsius.spiceet(celsius.utcstr(start, 'ISOC')[:10])
            finish_day = celsius.spiceet(celsius.utcstr(finish, 'ISOC')[:10]) \
                                + 86398. #1 day - 1s - 1 (possible) leap second

        # if verbose:
        #   print 'Remote path: ', self.remote_path + query_base_path

        ok_files = {}  # key will be the unique id of the file, value will be (version, the full name, local == True)
        files_to_delete = []

        n_downloaded = 0
        n_deleted    = 0

        # Find local matches
        for f in os.listdir(self.local_path + query_base_path):
            tmp = self.current_re.match(f)
            if tmp:
                unique_id, version_number = version_function(tmp.groups())

                if check_time:
                    file_time = date_function(tmp.groups())
                    if (file_time < start_day) or (file_time > finish_day):
                        continue

                if unique_id in ok_files:
                    if ok_files[unique_id][0] < version_number:
                        ok_files[unique_id] = (version_number, self.local_path + query_base_path + f, True)
                else:
                    ok_files[unique_id] = (version_number, self.local_path + query_base_path + f, True)

        if verbose:
            if ok_files:
                print('%d local matches with highest version %f' % (len(ok_files), max([v[0] for v in list(ok_files.values())])))
            else:
                print('No local matches')

        # Find remote matches
        if self.download:
            index_path = self.local_path + query_base_path + '.remote_index.html'
            remote_path = self.remote_path + query_base_path

            update_index = True
            if os.path.exists(index_path):
                age = py_time.time() - os.path.getmtime(index_path)
                if age < self.update_interval:
                    update_index = False

            if update_index:
                try:
                    self._get_remote(remote_path, index_path)
                except IOError as e:
                    if verbose:
                        print('Index %s does not exist' % remote_path)

                    if ok_files:
                        raise RuntimeError("No remote index available, but local matches were found anyway. This should never happen.")

                    return []


            with open(index_path) as f:
                remote_files = self.index_parser.extract_links(f.read()) # without the remote + base path

            if not remote_files:
                raise IOError('No remote files found from index file')

            # inspect each file, remove if it doesn't match the query, or is not the most recent version
            for f in remote_files:
                tmp = self.current_re.match(f)
                if tmp:
                    unique_id, version_number = version_function(tmp.groups())

                    if check_time:
                        file_time = date_function(tmp.groups())
                        if (file_time < start_day) or (file_time > finish_day):
                            continue

                    if unique_id in ok_files:
                        if ok_files[unique_id][0] < version_number:
                            # if we are overwriting a local entry, we will also need to delete the original file
                            if ok_files[unique_id][2]:
                                files_to_delete.append(ok_files[unique_id][1])

                            ok_files[unique_id] = (version_number, f, False)

                    else:
                        ok_files[unique_id] = (version_number, f, False)


            if not cleanup:
                for k in list(ok_files.keys()):
                    f = ok_files[k]
                    fname = self.remote_path + query_base_path + f[1]
                    if not f[2]: # download remote file
                        try:
                            self._get_remote(fname,
                                self.local_path + query_base_path + f[1])
                        except IOError as e:
                            print('Error encountered - index may be out of date?')
                            raise

                        # Update the name with the local directory
                        ok_files[k] = (f[0],
                            self.local_path + query_base_path + f[1],f[2])
                        n_downloaded += 1

            if verbose:
                if ok_files:
                    print('%d remote matches with highest version %f' % \
                        (len(ok_files), max([v[0] for v in list(ok_files.values())])))
                else:
                    print('No remote matches')

        for f in files_to_delete:
            if verbose:
                print('Deleting ' + f)
            os.remove(f)
            n_deleted += 1

        if not silent:
            print('Query %s: Returning %d (DL: %d, DEL: %d)' %
                (query, len(ok_files), n_downloaded, n_deleted))

        return [f[1] for f in list(ok_files.values())]