Esempio n. 1
0
def queue_writer():
    global queue
    # pass
    fh = open(os.getenv("SC_DATA_DIR") + "mex/ais_workflow_output.txt", "w")
    while True:
        g = queue.get()
        fh.write(str(celsius.utcstr(celsius.now())) + ": " + str(g) + "\n")
        fh.flush()
        queue.task_done()
    fh.close()
Esempio n. 2
0
def load_kernels(time=None, force=False, verbose=False,
                load_all=False, keep_previous=False):
    """Load spice kernels, with a stateful thing to prevent multiple calls"""
    last_spice_time_window = getattr(spiceypy,
            'last_spice_time_window', 'MVN:NONE')

    if load_all:
        # Launch to now + 10 yrs
        start = celsius.spiceet("2013-11-19T00:00")
        finish = celsius.spiceet(celsius.now() + 10.*86400.*365.)

    if time is None:
        start = None
        finish = None
        start_str = 'NO_TIME_SET'
        finish_str = ''
        start_int=-999999
        finish_int=-999999
    else:
        if hasattr(time, '__len__'):
            start = time[0]
            finish = time[-1]

        else:
            start = time
            finish = time
        start_str = celsius.utcstr(start, 'ISOC')
        finish_str = celsius.utcstr(finish, 'ISOC')
        start_int = int(start_str[2:4] + start_str[5:7] + '01')
        finish_int = int(finish_str[2:4] + finish_str[5:7] + '01')
        start_str = '%06d' % start_int
        finish_str = '%06d' % finish_int

    this_spice_time_window = start_str + finish_str

    if not 'NONE' in last_spice_time_window:
        if last_spice_time_window == this_spice_time_window:
            if verbose:
                print('LOAD_KERNELS [MVN]: Interval unchanged')
            return

        if keep_previous:
            if verbose:
                print('LOAD_KERNELS [MVN]: Keeping loaded kernels')
            return

    spiceypy.last_spice_time_window = 'MVN:' + this_spice_time_window

    spiceypy.kclear()

    try:
        kernel_directory = os.getenv('MAVEN_KERNEL_DIR')
        if verbose:
            print('LOAD_KERNELS [MVN]: Registering kernels:')

        for k in REQUIRED_KERNELS:

            if '*' in k:
                files = glob(kernel_directory + k)
                m = -1
                file_to_load = ''
                for f in files:
                    t = os.path.getmtime(f)
                    if t > m:
                        m = t
                        file_to_load = f
                if verbose:
                    print(file_to_load)
                if file_to_load:
                    spiceypy.furnsh(file_to_load)
                else:
                    raise IOError("No match for %s" % k)

            else:
                spiceypy.furnsh(kernel_directory + k)
                if verbose: print(kernel_directory + k)

        # time sensitive kernels
        load_count = 0

        # used to determine whether or not to load the most recent, unnumbered
        # rolling update kernel
        max_encountered = -99999

        if start_int > -999999:
            # Load time-sensitive kenrels
            for f in iglob(kernel_directory + 'spk/maven_orb_rec_*.bsp'):
                this_start = int(f.split('_')[3])
                this_finish = int(f.split('_')[4])
                if this_finish < start_int: continue
                if this_start > finish_int: continue
                spiceypy.furnsh(f)
                load_count += 1
                if verbose: print(f)

                if this_start > max_encountered: max_encountered = this_start
                if this_finish > max_encountered: max_encountered = this_finish

            if max_encountered < finish_int:
                # load the rolling-update kernel too
                f = kernel_directory + 'spk/maven_orb_rec.bsp'
                # spiceypy.furnsh(f)
                load_count += 1
                if verbose: print(f)

            if load_count == 0:
                raise IOError("No kernels matched for time period")

    except Exception as e:
        spiceypy.kclear()
        spiceypy.last_spice_time_window = 'MVN:NONE_ERROR'
        raise

    print('LOAD_KERNELS [MVN]: Loaded %s' % spiceypy.last_spice_time_window)
Esempio n. 3
0
def load_ngims_l2(start, finish, kind='ion', species='all',
        http_manager=None, delete_others=True, cleanup=False, verbose=None):
    kind = kind.lower()

    if not delete_others:
        raise RuntimeError("Not written yet")

    if http_manager is None:
        http_manager = sdc_interface.maven_http_manager

    if species is 'all':
        species = list(ION_MASSES.keys())

    t = start
    year, month = celsius.utcstr(t,'ISOC').split('-')[0:2]
    year = int(year)
    month = int(month)
    #  Each month:
    files = []
    while t < finish:
        files.extend(
                http_manager.query(
                    'ngi/l2/%04d/%02d/mvn_ngi_l2_%s-abund-*_v05_r*.csv' % \
                                            (year, month, kind),
                    start=start, finish=finish,
                    version_function=\
                        # lambda x: (x[0], float(x[1]) + float(x[2])/100.),
                        lambda x: (x[0], 5. + float(x[1])/100.),
                    date_function=\
                        lambda x: sdc_interface.yyyymmdd_to_spiceet(x[0].split('_')[1]),
                    cleanup=cleanup, verbose=verbose
                )
            )
        month += 1
        if month > 12:
            month = 0o1
            year += 1
        t = celsius.spiceet('%d-%02d-01T00:00' % (year, month))

    if cleanup:
        print('NGIMS L2 cleanup complete')

    if not files:
        raise IOError("No NGIMS data found")

    for f in sorted(files):
        if not os.path.exists(f):
            raise IOError("%s does not exist" % f)

    if kind == 'ion':
        output = {'time':None, 'total':None}
        for s in species:
            output[s] = None

        for f in sorted(files):
            if verbose:
                print(f)
            d = np.loadtxt(f, skiprows=1, delimiter=',', usecols=(0,12,14),
                    converters={0: lambda x: celsius.spiceet(x),
                                7: lambda x: float(x or 'NaN'),
                                9: lambda x: float(x or 'NaN')}).T
            count = None

            for s in species:
                mass = ION_MASSES[s]
                inx, = np.where(d[1] == mass)
                if count is None:
                    count = inx.size
                else:
                    if count != inx.size:
                        raise ValueError("Malformed file?")

                if output[s] is None:
                    output[s] = d[2,inx]
                else:
                    output[s] = np.hstack((output[s], d[2,inx]))

            if output['time'] is None:
                output['time'] = d[0, inx]
            else:
                output['time'] = np.hstack((output['time'], d[0, inx]))

    else:
        raise ValueError("Input kind='%s' not recognized" % kind)

    output['time'] = output['time']# + celsius.spiceet("1970-01-01T00:00")

    return output
Esempio n. 4
0
def load_kernels(time=None, force=False, verbose=False,
                load_all=False, keep_previous=False):
    """Load spice kernels, with a stateful thing to prevent multiple calls"""
    # global last_spice_time_window
    last_spice_time_window = getattr(spiceypy, 'last_spice_time_window', 'MEX:NONE')

    if load_all:
        # Launch to now + 10 yrs
        start = celsius.spiceet("2003-06-01T00:00")
        finish = celsius.spiceet(mex.now() + 10.*86400.*365.)

    if time is None:
        start = None
        finish = None
        start_str = 'NO_TIME_SET'
        finish_str = ''
        start_int=-999999
        finish_int=-999999
    else:
        if hasattr(time, '__len__'):
            start = time[0]
            finish = time[-1]

        else:
            start = time
            finish = time
        start_str = celsius.utcstr(start, 'ISOC')
        finish_str = celsius.utcstr(finish, 'ISOC')
        start_int = int(start_str[2:4] + start_str[5:7] + '01')
        finish_int = int(finish_str[2:4] + finish_str[5:7] + '01')
        start_str = '%06d' % start_int
        finish_str = '%06d' % finish_int

    this_spice_time_window = start_str + finish_str

    if not 'NONE' in last_spice_time_window:
        if last_spice_time_window == this_spice_time_window:
            if verbose:
                print('LOAD_KERNELS: Interval unchanged')
            return

        if keep_previous:
            if verbose:
                print('LOAD_KERNELS: Keeping loaded kernels')
            return

    spiceypy.last_spice_time_window = 'MEX:'+this_spice_time_window

    spiceypy.kclear()

    try:
        kernel_directory = mex.data_directory + 'spice'
        if verbose:
            print('LOAD_KERNELS: Registering kernels:')

        for k in REQUIRED_KERNELS:

            if '*' in k:
                files = glob.glob(kernel_directory + k)
                m = -1
                file_to_load = ''
                for f in files:
                    t = os.path.getmtime(f)
                    if t > m:
                        m = t
                        file_to_load = f
                if verbose:
                    print(file_to_load)
                if not file_to_load:
                    raise IOError("No matches found for %s" % k)
                spiceypy.furnsh(file_to_load)

            else:
                spiceypy.furnsh(kernel_directory + k)
                if verbose: print(kernel_directory + k)

        if start_int > -999999:
            # Load time-sensitive kenrels
            for f in glob.iglob(kernel_directory + '/spk/ORMM_T19_*.BSP'):
                this_int = int(f.split('_T19_')[1][:6])
                if this_int < start_int: continue
                if this_int > finish_int: continue
                spiceypy.furnsh(f)
                if verbose: print(f)

    except Exception as e:
        spiceypy.kclear()
        spiceypy.last_spice_time_window = 'MEX:NONE_ERROR'
        raise
Esempio n. 5
0
def plot_aspera_els(start, finish=None, verbose=False, ax=None, colorbar=True,
                        vmin=None, vmax=None, cmap=None, safe=True):
    """docstring for plot_aspera_els"""
    if cmap is None:
        cmap = plt.cm.Spectral_r

    if ax is None:
        ax = plt.gca()
    plt.sca(ax)

    if finish is None:
        finish = start + 86400.

    if vmin is None:
        vmin = 5.

    if vmax is None:
        vmax = 9.

    no_days = (finish - start) / 86400.

    if verbose: print('Plotting ASPERA/ELS between %s and %s...' % (celsius.utcstr(start, 'ISOC'), celsius.utcstr(finish, 'ISOC')))

    directory = mex.data_directory + 'aspera/els/'

    all_files_to_read = []

    for et in np.arange(start - 10., finish + 10., 86400.):
        dt = celsius.spiceet_to_datetime(et)
        f_name = directory + 'MEX_ELS_EFLUX_%4d%02d%02d_*.cef' % (dt.year, dt.month, dt.day)
        all_day_files = glob.glob(f_name)
        if not all_day_files:
            if verbose: print("No files matched %s" % f_name)
        else:
            all_files_to_read.extend(all_day_files)

    success = False
    all_extents = []
    for f_name in all_files_to_read:
        try:
            # Find energy bins:
            with open(f_name, 'r') as f:
                line_no = 0
                while line_no < 43:
                    line_no += 1
                    line = f.readline()
                    if 'DATA = ' in line:
                        energy_bins = np.fromstring(line[7:], sep=',')
                        energy_bins.sort()
                        break
                else:
                    raise IOError("No ENERGY_BINS info found in header")

            data = np.loadtxt(f_name, skiprows = 43, converters={1:lambda x: celsius.utcstr_to_spiceet(x[:-1])})

            if data.shape[1] != (energy_bins.shape[0] + 2):
                raise ValueError("Size of ENERGY_BINS and DATA doesn't match")

            # Check timing:
            dt = np.diff(data[:,1])
            spacing = np.median(dt)
            # checks = abs(dt - spacing) > (spacing/100.)
            # if np.any(checks):
            #     # raise ValueError("Spacing is not constant: %d differ by more than 1%% of %f:" % (np.sum(checks), spacing))
            #     print "Spacing is not constant: %d differ by more than 1%% of %f (Maximum = %f):" % (np.sum(checks), spacing, max(abs(dt - spacing)))
            #
            # if safe and (max(abs(dt - spacing)) > 10.):
            #     print '-- To big spacing - dumping'
            #     continue

            # Interpolate to constant spacing:
            n_records = int((data[-1,1] - data[0,1]) / spacing)
            new_data = np.empty((n_records, data.shape[1])) + np.nan
            new_data[:,1] = np.linspace(data[0,1], data[-1,1], n_records)
            for i in range(3, data.shape[1]):
                new_data[:,i] = np.interp(new_data[:,1],data[:,1], data[:,i], left=np.nan, right=np.nan)

            data = new_data

            extent = (data[0,1], data[-1,1], energy_bins[0], energy_bins[-1])

            if (extent[0] > finish) or (extent[1] < start):
                if verbose:
                    print("This block not within plot range - dumping")
                continue

            all_extents.append(extent)
            if verbose:
                print('Plotting ASPERA ELS block, Time: %s - %s, Energy: %f - %f' % (
                                celsius.utcstr(extent[0],'ISOC'), celsius.utcstr(extent[1],'ISOC'),
                                extent[2], extent[3]))
                print('Shape = ', data.shape)

            plt.imshow(np.log10(data[:,3:].T), interpolation="nearest", aspect='auto', extent=extent, vmin=vmin, vmax=vmax, cmap=cmap)
            success = True
        except IOError as e:
            if verbose:
                print('Error reading %f' % f_name)
                print('--', e)
            continue

    if success and colorbar:
        plt.xlim(start, finish)
        plt.ylim(max([e[2] for e in all_extents]), min([e[3] for e in all_extents]))
        celsius.ylabel('E / eV')
        plt.yscale('log')
        cmap.set_under('w')
        old_ax = plt.gca()
        plt.colorbar(cax=celsius.make_colorbar_cax(), cmap=cmap, ticks=[5,6,7,8,9])
        plt.ylabel(r'log$_{10}$ D.E.F.')
        plt.sca(old_ax)
Esempio n. 6
0
def load_static_l2(start, finish, kind='c0',
        http_manager=None, delete_others=True, cleanup=False, verbose=None):
    kind = kind.lower()

    full_kind = STATIC_PRODUCTS[kind]

    if http_manager is None:
        http_manager = sdc_interface.maven_http_manager

    t = start
    year, month = celsius.utcstr(t,'ISOC').split('-')[0:2]
    year = int(year)
    month = int(month)
    #  Each month:
    files = []
    while t < finish:
        files.extend(
                http_manager.query(
                    'sta/l2/%04d/%02d/mvn_sta_l2_%s_*_v*_r*.cdf' % \
                                            (year, month, full_kind),
                    start=start, finish=finish,
                    version_function=\
                        lambda x: (x[0], float(x[1]) + float(x[2])/100.),
                    date_function=lambda x: sdc_interface.yyyymmdd_to_spiceet(x[0]),
                    cleanup=cleanup, verbose=verbose
                )
            )
        month += 1
        if month > 12:
            month = 0o1
            year += 1
        t = celsius.spiceet('%d-%02d-01T00:00' % (year, month))

    if cleanup:
        print('static L2 Cleanup complete')
        return

    for f in sorted(files):
        if not os.path.exists(f):
            raise IOError("%s does not exist" % f)
    #
    # if kind == 'c6':
    #     output = {'time':None, 'eflux':None, 'static_kind':'c6'}
    #     for f in sorted(files):
    #         c = pycdf.CDF(f)
    #
    #         if output['time'] is None:
    #             output['time'] = np.array(c['time_unix'])
    #             output['eflux']  = np.array(c['eflux']).T
    #
    #             output['energy']  = np.array(c['energy'][0,:,0])
    #             output['mass']  = np.array(c['mass_arr'][:,0,0])
    #
    #         else:
    #             output['time'] = np.hstack((output['time'],
    #                                 np.array(c['time_unix'])))
    #             output['eflux']  = np.hstack((output['time'],
    #                                 np.array(c['eflux'].T)))
    #
    #             # if output['energy'].shape != c['energy'].shape[1]:
    #             #     raise ValueError("Energy range has changed!")
    #             #
    #             # if output['mass'].shape != c['mass_arr'].shape[0]:
    #             #     raise ValueError("Mass range has changed!")
    #
    #         c.close()

    if kind == 'c0':
        t0 = celsius.spiceet("1970-01-01T00:00")
        output = {'blocks':[], 'static_kind':'c0'}
        for f in sorted(files):
            c = pycdf.CDF(f)

            data = np.array(c['eflux'])
            last_ind = None
            last_block_start = None
            N = data.shape[0]

            for i in range(data.shape[0]):

                if last_ind is None:
                    last_ind = c['swp_ind'][i]
                    last_block_start = i

                if (c['swp_ind'][i] != last_ind) or (i == N):

                    img = data[last_block_start:i-1, :, :].sum(axis=1)
                    extent = (
                            c['time_unix'][last_block_start] + t0,
                            c['time_unix'][i-1] + t0,
                            c['energy'][0, -1, last_ind],
                            c['energy'][0, 0, last_ind],
                        )
                    # print(last_ind, extent)
                    output['blocks'].append((extent, (img.T[::-1,:])))
                    # plt.imshow(np.log10(img.T[::-1,:]), extent=extent,
                    #             origin='lower', interpolation='nearest')
                    last_ind = None

            c.close()


    else:
        raise ValueError("Input kind='%s' not recognized" % kind)

    # output['time'] = output['time'] + celsius.spiceet("1970-01-01T00:00")

    return output
Esempio n. 7
0
File: kp.py Progetto: irbdavid/maven
def load_kp_data(start, finish, vars='ALL', truncate_time=True,
        http_manager=None, cleanup=False, verbose=None):
    """Reads MAVEN kp data into a structure.  Downloads / syncs if neccessary.
    Args:
        start, finish: SPICEET times
        vars: variable names to store (not implemented - default ALL)
        http_manager: connection to use
        cleanup: if True, no data will be downloaded or returned, and instead
            only superceded local files will be deleted
        verbose: locally overrides http_manager.verbose
        truncate_time: slice out only those points between start and finish,
            or return whole days if false

    Returns:
        results of kp_read_files for the located files
    """

    if http_manager is None:
        http_manager = sdc_interface.maven_http_manager

    t = start
    year, month = celsius.utcstr(t,'ISOC').split('-')[0:2]
    year = int(year)
    month = int(month)
    #  Each month:
    files = []
    while t < finish:
        files.extend(
                http_manager.query(
                    'kp/insitu/%04d/%02d/mvn_kp_insitu_*_v*_r*.tab' % \
                                            (year, month),
                    start=start, finish=finish,
                    version_function=\
                        lambda x: (x[0], float(x[1]) + float(x[2])/100.),
                    date_function=lambda x: sdc_interface.yyyymmdd_to_spiceet(x[0]),
                    cleanup=cleanup, verbose=verbose
                )
            )
        month += 1
        if month > 12:
            month = 0o1
            year += 1
        t = celsius.spiceet('%d-%02d-01T00:00' % (year, month))

    if cleanup:
        print('KP cleanup complete')
        return

    for f in sorted(files):
        if not os.path.exists(f):
            raise IOError("%s does not exist" % f)

    if not files:
        raise IOError("No KP data found")

    data = kp_read_files(files)

    if truncate_time:
        inx, = np.where((data.time > start) & (data.time < finish))
        for k in list(data.keys()):
            if k not in ('time', 'descriptions'):
                for kk in list(data[k].keys()):
                    data[k][kk] = data[k][kk][inx]

    data['time'] = data['spacecraft']['time'] # a link

    return data
Esempio n. 8
0
def load_swia_l2_summary(start, finish, kind='onboardsvymom',
        http_manager=None, delete_others=True, cleanup=False, verbose=None):
    kind = kind.lower()

    if not delete_others:
        raise RuntimeError("Not written yet")

    if http_manager is None:
        http_manager = sdc_interface.maven_http_manager

    t = start
    year, month = celsius.utcstr(t,'ISOC').split('-')[0:2]
    year = int(year)
    month = int(month)
    #  Each month:
    files = []
    while t < finish:
        files.extend(
                http_manager.query(
                    'swi/l2/%04d/%02d/mvn_swi_l2_%s_*_v*_r*.cdf' % \
                                            (year, month, kind),
                    start=start, finish=finish,
                    version_function=\
                        lambda x: (x[0], float(x[1]) + float(x[2])/100.),
                    date_function=lambda x: sdc_interface.yyyymmdd_to_spiceet(x[0]),
                    cleanup=cleanup, verbose=verbose
                )
            )
        month += 1
        if month > 12:
            month = 0o1
            year += 1
        t = celsius.spiceet('%d-%02d-01T00:00' % (year, month))

    if cleanup:
        print('SWIA L2 Cleanup complete')
        return

    for f in sorted(files):
        if not os.path.exists(f):
            raise IOError("%s does not exist" % f)


    if kind == 'onboardsvyspec':
        output = {'time':None, 'def':None}
        for f in sorted(files):
            c = pycdf.CDF(f)

            if output['time'] is None:
                output['time'] = np.array(c['time_unix'])
                output['def']  = np.array(c['spectra_diff_en_fluxes']).T

                # Some weird formatting here:
                output['energy']  = np.array(
                    [c['energy_spectra'][i] for i in range(c['energy_spectra'].shape[0])]
                )
                output['energy'] = output['energy'][::-1]
            else:
                output['time'] = np.hstack((output['time'],
                                    np.array(c['time_unix'])))
                output['def'] = np.hstack((output['def'],
                                    np.array(c['spectra_diff_en_fluxes']).T))

                if output['energy'].shape != c['energy_spectra'].shape:
                    raise ValueError("Energy range has changed!")

            c.close()
        output['def'] = output['def'][::-1,:]

    elif kind == 'onboardsvymom':
        output = {'time':None, 'velocity':None, 'density':None,
                                'temperature':None, 'quality_flag':None}
        for f in sorted(files):
            c = pycdf.CDF(f)

            if output['time'] is None:
                output['time'] = np.array(c['time_unix'])
                output['quality_flag'] = np.array(c['quality_flag'])
                output['density'] = np.array(c['density'])
                output['velocity'] = np.array(c['velocity']).T
                output['temperature'] = np.array(c['temperature']).T
            else:
                merge_attrs(output, 'time', c, 'time_unix')
                merge_attrs(output, 'quality_flag', c)
                merge_attrs(output, 'density', c)

                merge_attrs(output, 'velocity', c, transpose=True)
                merge_attrs(output, 'temperature', c, transpose=True)

            c.close()

    else:
        raise ValueError("Input kind='%s' not recognized" % kind)

    output['time'] = output['time'] + celsius.spiceet("1970-01-01T00:00")

    return output
Esempio n. 9
0
def lpw_l2_load(start, finish, kind='lpnt', http_manager=None, cleanup=False,
                    verbose=None):
    """Finds and loads LPW L2 data"""

    if http_manager is None: http_manager = sdc_interface.maven_http_manager
    kind = kind.lower()

    t = start
    year, month = celsius.utcstr(t,'ISOC').split('-')[0:2]
    year = int(year)
    month = int(month)
    #  Each month:
    files = []
    while t < finish:
        # print year, month
        files.extend(
                http_manager.query(
                    'lpw/l2/%04d/%02d/mvn_lpw_l2_%s_*_v*_r*.cdf' % \
                                            (year, month, kind),
                    start=start, finish=finish,
                    version_function=\
                        lambda x: (x[0], float(x[1]) + float(x[2])/100.),
                    date_function=lambda x:
                                sdc_interface.yyyymmdd_to_spiceet(x[0]),
                    verbose=verbose
                )
            )
        month += 1
        if month > 12:
            month = 1
            year += 1
        t = celsius.spiceet('%d-%02d-01T00:00' % (year, month))

    if cleanup:
        print('LPW L2 cleanup complete')
        return

    if not files:
        raise IOError("No data found")

    for f in sorted(files):
        if not os.path.exists(f):
            raise IOError("%s does not exist" % f)


    if kind == 'lpnt':
        output = dict(time=None, ne=None, te=None, usc=None)
        for f in sorted(files):
            c = pycdf.CDF(f)
            if output['time'] is None:
                # inx =
                output['time'] = np.array(c['time_unix'])
                output['ne'] = np.array(c['data'][:,0])
                output['te'] = np.array(c['data'][:,1])
                output['usc'] = np.array(c['data'][:,2])
            else:
                output['time'] = np.hstack((output['time'],
                    np.array(c['time_unix'])))

                for v, i in zip(('ne', 'te', 'usc'), (0,1,2)):
                    output[v] = np.hstack((output[v], np.array(c['data'][:,i])))
            c.close()

    elif kind == 'wn':
        output = dict(time=None, ne=None)
        for f in sorted(files):
            print(f)
            c = pycdf.CDF(f)
            if output['time'] is None:
                # inx =
                output['time'] = np.array(c['time_unix'])
                output['ne'] = np.array(c['data'])
            else:
                output['time'] = np.hstack((output['time'],
                    np.array(c['time_unix'])))
                output['ne'] = np.hstack((output['ne'],
                    np.array(c['data'])))

                # for v, i in zip(('ne', 'te', 'usc'), (0,1,2)):
                #     output[v] = np.hstack((output[v], np.array(c['data'][:,i])))
            c.close()


    elif kind == 'wspecact':
        output = dict(time=None, spec=None, freq=None)
        for f in sorted(files):
            print(f)
            c = pycdf.CDF(f)

            if output['time'] is None:
                output['time'] = np.array(c['time_unix'])
                output['spec'] = np.array(c['data']).T
                output['freq'] = np.array(c['freq'][0,:])
            else:
                output['time'] = np.hstack((output['time'],
                                np.array(c['time_unix'])))
                output['spec'] = np.hstack((output['spec'],
                                np.array(c['data']).T))
            c.close()

        # print 'Warning: spectra output is not interpolated!'

    elif kind == 'wspecpas':
        output = dict(time=None, spec=None, freq=None)
        for f in sorted(files):
            print(f)
            c = pycdf.CDF(f)

            if output['time'] is None:
                output['time'] = np.array(c['time_unix'])
                output['spec'] = np.array(c['data']).T
                output['freq'] = np.array(c['freq'][0,:])
            else:
                output['time'] = np.hstack((output['time'],
                                np.array(c['time_unix'])))
                output['spec'] = np.hstack((output['spec'],
                                np.array(c['data']).T))
        # print 'Warning: spectra output is not interpolated!'
            c.close()

    elif kind == 'lpiv':
        output = dict(time=None, current=None, volt=None)
        for f in sorted(files):
            c = pycdf.CDF(f)

            if output['time'] is None:
                output['time'] = np.array(c['time_unix'])
                output['current'] = [np.array(c['data']).T]
                output['volt'] = [np.array(c['volt']).T]
            else:
                output['time'] = np.hstack((output['time'],
                                np.array(c['time_unix'])))
                output['current'].append(np.array(c['data']).T)
                output['volt'].append(np.array(c['volt']).T)

            c.close()

    else:
        raise ValueError("Input kind='%s' not recognized" % kind)

    output['time'] = output['time'] + celsius.spiceet("1970-01-01T00:00")
    return output
Esempio n. 10
0
    def set_ionogram(self, ionogram, update=True, auto=None):

        if auto is None:
            auto = self.auto

        if not isinstance(ionogram, ais.Ionogram):
            if isinstance(ionogram, str):
                if ionogram.lower() == 'next':
                    ig_inc = 1
                elif ionogram.lower() == 'previous':
                    ig_inc = -1
            else:
                ig_inc = ionogram

            for i, ig in enumerate(self.ionogram_list):
                if ig is self.current_ionogram:
                    if ((i + ig_inc) > 0) and ((i + ig_inc) < len(self.ionogram_list)):
                        ionogram = self.ionogram_list[i+ig_inc]
                    else:
                        self.set_orbit(self.orbit + int(ig_inc / abs(ig_inc)), strict=False)

        if ionogram is not self.current_ionogram:
            if not self.digitization_saved():
                # print "Current digitization not saved"
                self.save_current_digitization()
            self.current_ionogram = ionogram

            plt.close(self.fp_local_figure_number)
            plt.close(self.td_cyclotron_figure_number)

            # Try to load from DB, otherwise set up an empty one:
            dig = self.digitization_db.get_nearest(ionogram.time)
            self.current_ionogram.digitization = dig

            # if self.debug: print 'Found %d digitizations' % len(dig)
            if not dig:
                dig = IonogramDigitization()
                dig.time = self.current_ionogram.time
                self.current_ionogram.digitization = dig
                if auto:
                    self.auto_fit(update=update)
                    self._digitization_saved = False
            else:
                # We loaded something, fresh from the DB and therefore:
                self._digitization_saved = True

            self.selected_plasma_lines = []
            self.selected_cyclotron_lines = []

            plt.sca(self.ig_ax)
            plt.cla()
            plt.sca(self.ne_ax)
            plt.cla()

            ig_index = 0
            test_ig = self.ionogram_list[0]
            while test_ig != self.current_ionogram:
                ig_index += 1
                test_ig = self.ionogram_list[ig_index]

            self.message("Set ionogram to %s [%d/%d]" % (
                celsius.utcstr(1. * self.current_ionogram.time, format='C'),
                ig_index,
                len(self.ionogram_list)))

            if update:
                self.set_status(None)
                self.update()
            return self
Esempio n. 11
0
def get_densities(start, finish=None, verbose=False, sweeps=True,
                cleanup=False):
    """Routine to extract Dave's own processed densities"""
    if finish is None: finish = start + 86400. - 1.

    if start > finish: raise ValueError("Start %f exceeds %f" % (start, finish))

    directory = os.getenv("SC_DATA_DIR", os.path.expanduser("~/data/"))
    directory += 'maven/ping/'

    t = start
    chunks = []
    while t < finish:
        try:
            date = celsius.utcstr(t, 'ISOC')[:10]
            fname = directory + date[:4] + '/' + date + '.sav'
            tmp = readsav(fname)
            if not 'sza' in tmp:
                print(fname + ' out of date, skipping')
                t+=86400.
                continue

            n = len(tmp['time'])
            for k in ('sza', 'density', 'flag'):
                if tmp[k].shape[-1] != n:
                    print('Malformed ', fname)
                    continue


            chunks.append(tmp)
            if verbose:
                print(fname + ', ' +  str(len(chunks[-1]['time'])))

        except IOError as e:
            if verbose:
                print("Missing: " + fname)

        t += 86400.

    if not chunks:
        print('No data found')
        return chunks

    banned_keys = ('probe', 'spec', 'spec_f', 'iv1', 'iv2') # The spectra are not retained

    output = {}
    for k in list(chunks[0].keys()):
        # print k, chunks[0][k].shape
        if k in banned_keys: continue
        output[k] = np.hstack([c[k] for c in chunks])
    print(output['sza'].shape == output['time'].shape)

    if sweeps:
        for k in ('iv1', 'iv2'):
            output[k] = {}
            for kk in chunks[0][k].dtype.names:
                if kk.lower() in banned_keys: continue
                output[k][kk.lower()] = np.hstack([c[k][kk][0] for c in chunks])

    print(output['sza'].shape == output['time'].shape)
    inx, = np.where((output['time'] > start) & (output['time'] < finish))
    for k in list(output.keys()):
        if k in banned_keys: continue
        output[k] = output[k][...,inx]

    inx = np.argsort(output['time'])
    for k in list(output.keys()):
        if k in banned_keys: continue
        output[k] = output[k][...,inx]

    if sweeps:
        for k in ('iv1', 'iv2'):
            inx, = np.where((output[k]['time'] > start) & (output[k]['time'] < finish))
            for kk in list(output[k].keys()):
                output[k][kk] = output[k][kk][inx]
            inx = np.argsort(output[k]['time'])
            for kk in list(output[k].keys()):
                output[k][kk] = output[k][kk][inx]

    if cleanup:
        print("Cleaning up some timing error - any negative time steps being erased")
        dt = np.diff(output['time'])
        inx, = np.where(dt < 0.)
        if inx.size != 0:
            for k in list(output.keys()):
                if k in banned_keys: continue
                output[k][inx+1] *= np.nan

        if sweeps:
            for k in ('iv1', 'iv2'):
                inx, = np.where(np.diff(output[k]['time']) < 0.)
                if inx.size != 0:
                   for kk in list(output[k].keys()):
                        output[k][kk][inx + 1] *= np.nan

    return output
Esempio n. 12
0
    def __init__(self, orbit, start_time=None, finish_time=None, debug=False,
                    fig=None, db_filename=None, marker_size=4.,
                    verbose=False, vmin=None, vmax=None):
        super(AISReview, self).__init__()

        self.orbit = orbit
        self.verbose = verbose

        self.debug = debug
        self.fig = fig
        # if self.fig is None:
        #     self.fig = plt.figure()

        self.calibrator = ais_code.AISEmpiricalCalibration()

        self.mex_orbit = mex.orbits[self.orbit]
        self.start_time = self.mex_orbit.periapsis - 200. * \
                        ais_code.ais_spacing_seconds
        self.finish_time = self.mex_orbit.periapsis + 200. * \
                        ais_code.ais_spacing_seconds

        if start_time:
            self.start_time = start_time

        if finish_time:
            self.finish_time = finish_time

        if vmin is None:
            vmin = ais_code.ais_vmin

        if vmax is None:
            vmax = ais_code.ais_vmax

        self.vmin = vmin
        self.vmax = vmax

        self.marker_size = marker_size
        self.cbar_ticks = np.arange(-16,-8, 2)

        # Don't keep the db active
        if db_filename is not None:
            self.digitization_list = ais_code.DigitizationDB(filename=db_filename,
                            verbose=self.verbose).get_all()
        else:
            self.digitization_list = ais_code.DigitizationDB(orbit=self.orbit,
                                verbose=self.verbose).get_all()

        if self.digitization_list:
            self._newest = celsius.utcstr(float(max([d.timestamp for d in self.digitization_list])),format='C')[:-9]
            self._oldest = celsius.utcstr(float(min([d.timestamp for d in self.digitization_list])),format='C')[:-9]
            print("%d digitizations loaded, produced between: %s and %s" % (
                                            len(self.digitization_list), self._oldest, self._newest))
        else:
            print("No digitizations loaded :(")
            self._newest = np.nan
            self._oldest = np.nan

        self.ionogram_list = ais_code.read_ais(self.orbit)

        for i in self.ionogram_list:
            i.interpolate_frequencies()

        no_linear_frequencies = self.ionogram_list[0].data.shape[1]

        # self.extent = [self.ionogram_list[0].time, self.ionogram_list[-1].time,
        #                 min(self.ionogram_list[0].frequencies) / 1.0E6,
        #                 max(self.ionogram_list[0].frequencies) / 1.0E6]

        self.extent = [self.start_time,
                       self.finish_time,
                       min(self.ionogram_list[0].frequencies) / 1.0E6,
                       max(self.ionogram_list[0].frequencies) / 1.0E6 ]

        s0 =self.mex_orbit.periapsis
        print(self.extent[0] - s0, self.extent[1] - s0)
        print(self.ionogram_list[0].time - s0, self.ionogram_list[-1].time - s0)
        print(min([i.time for i in self.ionogram_list]) - s0, max([i.time for i in self.ionogram_list]) - s0)


        if self.ionogram_list[0].time < self.extent[0]:
            print('WARNING: Pre-extending plot range by %s seconds to cover loaded ionograms' % (
                            self.extent[0] - self.ionogram_list[0].time))
            self.extent[0] = self.ionogram_list[0].time

        if self.ionogram_list[-1].time > self.extent[1]:
            print('WARNING: Post-extending plot range by %s seconds to cover loaded ionograms' % (
                        self.ionogram_list[-1].time - self.extent[1]))
            self.extent[1] = self.ionogram_list[-1].time

        no_ionograms_expected = ((self.extent[1] - self.extent[0])
                                                        / ais_code.ais_spacing_seconds + 1)

        no_ionograms_expected = int(round(no_ionograms_expected))
        self.tser_arr_all = np.empty((ais_code.ais_number_of_delays, no_linear_frequencies,
            no_ionograms_expected))

        ilast = None
        empty_count = 0
        for i, ig in enumerate(self.ionogram_list):
            ignum = int( round((ig.time - (self.extent[0] + ais_code.ais_spacing_seconds)) / ais_code.ais_spacing_seconds ))
            if ignum > (no_ionograms_expected-1):
                raise mex.MEXException("Out of range %d, %d, %d"
                    % (len(self.ionogram_list), ignum, no_ionograms_expected))

            ig.interpolate_frequencies()
            self.tser_arr_all[:,:,ignum] = ig.data
            if ilast is not None:
                if (ignum != (ilast + 1)):
                    empty_count += 1
                    self.tser_arr_all[:,:,ilast+1:ignum-1] = -9E99
            ilast = ignum

        if empty_count:
            print('Found %d empty ionograms / missing data' % empty_count)
        errs = np.geterr()
        np.seterr(divide='ignore')
        self.tser_arr = np.log10(np.mean(self.tser_arr_all[::-1,:, :], axis=0))
        self.tser_arr_all = np.log10(self.tser_arr_all)
        np.seterr(**errs)

        # Trajectory info
        # self.trajectory = {}
        # self.trajectory['t'] = np.arange(self.extent[0], self.extent[1], 60.)
        # pos = mex.iau_mars_position(self.trajectory['t'])
        # self.trajectory['pos'] = pos / mex.mars_mean_radius_km

        self.field_model = mars.CainMarsFieldModel(nmax=60)
        # self.quick_field_model = mars.CainMarsFieldModelAtMEX()
        self.generate_position()
        self.ionosphere_model = mars.Morgan2008ChapmanLayer()
Esempio n. 13
0
        print(field)

        plt.subplot(311)
        plt.plot(et, pos[0,:] - mex.mars_mean_radius_km)
        # plt.plot(et, p[0,:] - mex.mars_mean_radius_km)

        plt.subplot(312)
        plt.plot(et, pos[1,:])
        plt.plot(et, pos[2,:])

        plt.subplot(313)
        plt.plot(et, field[0,:], 'r-')
        plt.plot(et, field[1,:], 'g-')
        plt.plot(et, field[2,:], 'b-')
        plt.plot(et, np.sqrt(np.sum(field * field, axis=0)), 'k-')
        print(celsius.utcstr(et[0]))
        print(celsius.utcstr(et[-1]))
        plt.gca().xaxis.set_major_locator(celsius.SpiceetLocator())
        plt.gca().xaxis.set_major_formatter(celsius.SpiceetFormatter())

        if comparison == 'Duru':
            plt.ylim(-400,400)

        plt.show()

    if False:
        plt.figure(figsize=(8,6))
        lt = np.arange(90, -90, -1.)
        ln = np.arange(0, 360, 1.)
        lat, lon = np.meshgrid(lt, ln)
        r = np.zeros_like(lat.flatten()) +  mex.mars_mean_radius_km + 150.
Esempio n. 14
0
    def query(self, query, version_function=None, date_function=None,
                start=None, finish=None, cleanup=False, verbose=None,
                silent=None):
        """Takes a query, returns a list of local files that match.

Will first query the remote server, download missing files, and then delete local files that match the query but are no longer present on the remote. The implicit assumption here is that the remote directory is PERFECTLY maintained.

Args:
    query: query string with wildcards to locate a file on the remote server,
        e.g. 'sci/lpw/l2/2015/01/mvn_lpw_l2_lpnt_*_v*_r*.cdf'

    version_function: takes the expanded wildcards from the query, and converts
        them to a number used to compare versions and releases (higher=better).
        For example:
            lambda x: (x[0], float(x[1]) + float(x[2])/100.)
        to generate 1.02 for V1, R2 for the above query (2nd and 3rd wildcards)

    date_function: takes the expanded wildcards from the query, and converts to
        a date for the content of the file, for example:
            lambda x: yyyymmdd_to_spiceet(x[0])
        for the above query example.

    start: start date SPICEET, ignored if not set
    finish: finish date, ignored if not set. 'finish' must be set if 'start' is
        (can use np.inf, if you want)

Returns: List of local files, freshly downloaded if necessary, that satisfy the
    query supplied.
        """

        file_list = []
        split_query = query.split('/')
        query_base_path = '/'.join(split_query[:-1]) + '/'
        query_filename  = split_query[-1]

        if verbose is None: verbose = self.verbose
        if silent is None: silent = self.silent

        if version_function is None:
            version_function = lambda x: 0, ''.join(x)

        self.current_re = re.compile(query_filename.replace("*", "(\w*)"))

        if not os.path.exists(self.local_path + query_base_path):
            os.makedirs(self.local_path + query_base_path)

        check_time = False
        if start or finish: check_time = True

        if check_time and (date_function is None):
            raise ValueError("Start and finish are set, but date_function is not")

        if check_time:
            start_day = celsius.spiceet(celsius.utcstr(start, 'ISOC')[:10])
            finish_day = celsius.spiceet(celsius.utcstr(finish, 'ISOC')[:10]) \
                                + 86398. #1 day - 1s - 1 (possible) leap second

        # if verbose:
        #   print 'Remote path: ', self.remote_path + query_base_path

        ok_files = {}  # key will be the unique id of the file, value will be (version, the full name, local == True)
        files_to_delete = []

        n_downloaded = 0
        n_deleted    = 0

        # Find local matches
        for f in os.listdir(self.local_path + query_base_path):
            tmp = self.current_re.match(f)
            if tmp:
                unique_id, version_number = version_function(tmp.groups())

                if check_time:
                    file_time = date_function(tmp.groups())
                    if (file_time < start_day) or (file_time > finish_day):
                        continue

                if unique_id in ok_files:
                    if ok_files[unique_id][0] < version_number:
                        ok_files[unique_id] = (version_number, self.local_path + query_base_path + f, True)
                else:
                    ok_files[unique_id] = (version_number, self.local_path + query_base_path + f, True)

        if verbose:
            if ok_files:
                print('%d local matches with highest version %f' % (len(ok_files), max([v[0] for v in list(ok_files.values())])))
            else:
                print('No local matches')

        # Find remote matches
        if self.download:
            index_path = self.local_path + query_base_path + '.remote_index.html'
            remote_path = self.remote_path + query_base_path

            update_index = True
            if os.path.exists(index_path):
                age = py_time.time() - os.path.getmtime(index_path)
                if age < self.update_interval:
                    update_index = False

            if update_index:
                try:
                    self._get_remote(remote_path, index_path)
                except IOError as e:
                    if verbose:
                        print('Index %s does not exist' % remote_path)

                    if ok_files:
                        raise RuntimeError("No remote index available, but local matches were found anyway. This should never happen.")

                    return []


            with open(index_path) as f:
                remote_files = self.index_parser.extract_links(f.read()) # without the remote + base path

            if not remote_files:
                raise IOError('No remote files found from index file')

            # inspect each file, remove if it doesn't match the query, or is not the most recent version
            for f in remote_files:
                tmp = self.current_re.match(f)
                if tmp:
                    unique_id, version_number = version_function(tmp.groups())

                    if check_time:
                        file_time = date_function(tmp.groups())
                        if (file_time < start_day) or (file_time > finish_day):
                            continue

                    if unique_id in ok_files:
                        if ok_files[unique_id][0] < version_number:
                            # if we are overwriting a local entry, we will also need to delete the original file
                            if ok_files[unique_id][2]:
                                files_to_delete.append(ok_files[unique_id][1])

                            ok_files[unique_id] = (version_number, f, False)

                    else:
                        ok_files[unique_id] = (version_number, f, False)


            if not cleanup:
                for k in list(ok_files.keys()):
                    f = ok_files[k]
                    fname = self.remote_path + query_base_path + f[1]
                    if not f[2]: # download remote file
                        try:
                            self._get_remote(fname,
                                self.local_path + query_base_path + f[1])
                        except IOError as e:
                            print('Error encountered - index may be out of date?')
                            raise

                        # Update the name with the local directory
                        ok_files[k] = (f[0],
                            self.local_path + query_base_path + f[1],f[2])
                        n_downloaded += 1

            if verbose:
                if ok_files:
                    print('%d remote matches with highest version %f' % \
                        (len(ok_files), max([v[0] for v in list(ok_files.values())])))
                else:
                    print('No remote matches')

        for f in files_to_delete:
            if verbose:
                print('Deleting ' + f)
            os.remove(f)
            n_deleted += 1

        if not silent:
            print('Query %s: Returning %d (DL: %d, DEL: %d)' %
                (query, len(ok_files), n_downloaded, n_deleted))

        return [f[1] for f in list(ok_files.values())]
Esempio n. 15
0
def plot_ima_spectra(
    start,
    finish,
    species=["H", "O", "O2"],
    colorbar=True,
    ax=None,
    blocks=None,
    check_times=True,
    return_val=None,
    verbose=False,
    check_overlap=True,
    vmin=2,
    vmax=7.0,
    raise_all_errors=False,
    cmap=None,
    norm=None,
    die_on_empty_blocks=False,
    accept_new_tables=True,
    inverted=True,
    **kwargs
):

    """ Plot IMA spectra from start - finish, .
    blocks is a list of data blocks to work from, if this is not specified then we'll read them
    species names: [only those with x will function]
        heavy (16, 85, 272) x
        sumions (85, 32)
        CO2 (16, 85, 272) x
        E (96, 1)
        Horig (16, 85, 272) x
        tmptime (1, 272)
        H (16, 85, 272)
        dE (1, 85)
        sumorigions (85, 32)
        O (16, 85, 272) x
        Hsp (16, 85, 272) x
        mass (50, 272)
        alpha (16, 85, 272) x
        O2 (16, 85, 272) x
        He (16, 85, 272) x

    for blocks read with dataset='fion', we'll add an 'f' before
            CO2, O2, O2plus, O, Horig, He, H
    automagically
    """

    if not blocks:
        blocks = read_ima(start, finish, verbose=verbose, **kwargs)

    if ax is None:
        ax = plt.gca()
    plt.sca(ax)
    ax.set_axis_bgcolor("lightgrey")

    if not cmap:
        cmap = matplotlib.cm.Greys_r
        # cmap.set_under('white')
    if not norm:
        norm = plt.Normalize(vmin, vmax, clip=True)

    ims = []

    last_finish = -9e99

    if blocks:
        if "fH" in list(blocks[0].keys()):  # blocks were read with dataset='fion'
            if isinstance(species, str):
                if species in ("O", "H", "He", "alpha", "Horig", "O2", "O2plus", "CO2"):
                    species = "f" + species
            else:
                new_species = []
                for s in species:
                    if s in ("O", "H", "He", "alpha", "Horig", "O2", "O2plus", "CO2"):
                        new_species.append("f" + s)
                species = new_species

    # if isinstance(species, basestring):
    #     if species[0] == 'f':
    #         label =  species[1:] + r' flux \n / $cm^{-2}s^{-1}$'
    #     else:
    #         label = species + '****'
    # else:
    #     label = ''
    #     for s in species:
    #         if s[0] == 'f':
    #             label += s[1:] + ', '
    #         else:
    #             label += s
    #     label += r' flux \n/ $cm^{-2}s^{-1}$'

    label = r"Flux / $cm^{-2}s^{-1}$"

    # Make sure we set the ylimits correctly by tracking min and max energies
    min_energy = 60000.0
    max_energy = -10.0

    for b in blocks:
        # min(abs()) to account for negative values in E table
        extent = [
            celsius.matlabtime_to_spiceet(b["tmptime"][0]),
            celsius.matlabtime_to_spiceet(b["tmptime"][-1]),
            min(abs(b["E"])),
            max(b["E"]),
        ]
        # Account for the varying size of the Energy table:
        if b["sumions"].shape[0] == 96:
            extent[2] = b["E"][-1]
            extent[3] = b["E"][0]
        elif b["sumions"].shape[0] == 85:  # revised energy table
            extent[2] = b["E"][-11]
            extent[3] = b["E"][0]
            if extent[2] < 0.0:
                raise ValueError("Energies should be positive - unrecognised energy table?")
        else:
            if accept_new_tables:
                extent[2] = np.min(np.abs(b["E"]))
                extent[3] = b["E"][0]
                print("New table:", extent[2], extent[3], b["E"][-1], b["E"][0], b["sumions"].shape[0])
            else:
                raise ValueError(
                    "Unrecognised energy table: E: %e - %e in %d steps?"
                    % (b["E"][-1], b["E"][-1], b["sumions"].shape[0])
                )

        if extent[2] < min_energy:
            min_energy = extent[2]

        if extent[3] > max_energy:
            max_energy = extent[3]

        if check_times:
            spacing = 86400.0 * np.mean(np.diff(b["tmptime"]))
            if spacing > 15.0:
                if raise_all_errors:
                    raise ValueError("Resolution not good? Mean spacing = %fs " % spacing)
                else:
                    plt.annotate(
                        "Resolution warning:\nmean spacing = %.2fs @ %s "
                        % (spacing, celsius.utcstr(np.median(b["tmptime"]))),
                        (0.5, 0.5),
                        xycoords="axes fraction",
                        color="red",
                        ha="center",
                    )

        if not isinstance(species, str):
            # produce the MxNx3 array for to up 3 values of species (in R, G, B)
            img = np.zeros((b[species[0]].shape[1], b[species[0]].shape[2], 3)) + 1.0
            for i, s in enumerate(species):
                im = np.sum(b[s], 0).astype(np.float64)
                # im[im < 0.01] += 0.1e-10

                if inverted:
                    im = 1.0 - norm(np.log10(im))
                    for j in (0, 1, 2):
                        if j != i:
                            img[..., j] *= im
                    tot = np.sum(1.0 - img)
                else:
                    img[..., i] *= norm(np.log10(im))
                    tot = np.sum(img)

        else:
            img = np.sum(b[species], 0)
            img[img < 0.01] += 0.1e-10
            img = np.log10(img)
            tot = np.sum(img)
        if verbose:
            print("Total scaled: %e" % tot)
            # print 'Fraction good = %2.0f%%' % (np.float(np.sum(np.isfinite(img))) / (img.size) * 100.)
            # print 'Min, mean, max = ', np.min(img), np.mean(img), np.max(img)

        if check_overlap and (extent[0] < last_finish):
            raise ValueError("Blocks overlap: Last finish = %f, this start = %f" % (last_finish, extent[0]))

        if FUDGE_FIX_HANS_IMA_TIME_BUG:
            # print extent, last_finish
            if abs(extent[0] - last_finish) < 20.0:  # if there's 20s or less between blocks
                if verbose:
                    print("\tFudging extent: Adjusting start by %fs" % (extent[0] - last_finish))
                extent[0] = last_finish  # squeeze them together

        if inverted:
            name = cmap.name
            if name[-2:] == "_r":
                name = name[:-2]
            else:
                name = name + "_r"
            cmap = getattr(plt.cm, name)

        if extent[1] < start:
            # if verbose:
            print("Dumping block (B)", start - extent[1])
            continue
        if extent[0] > finish:
            print("Dumping block (A)", extent[0] - finish)
            continue

        ims.append(plt.imshow(img, extent=extent, origin="upper", interpolation="nearest", cmap=cmap, norm=norm))

        last_finish = extent[1]

    if ims:
        plt.xlim(start, finish)
        plt.ylim(min_energy, max_energy)
        cbar_im = ims[0]
    else:
        plt.ylim(10.0, 60000)  # guess
        # invisible image for using with colorbar
        cbar_im = plt.imshow(np.zeros((1, 1)), cmap=cmap, norm=norm, visible=False)

    plt.yscale("log")
    celsius.ylabel("E / eV")

    if colorbar:
        ## make_colorbar_cax is doing something weird to following colorbars...
        # if not isinstance(species, basestring):
        #     img = np.zeros((64,len(species),3)) + 1.
        #     cax = celsius.make_colorbar_cax(width=0.03)
        #     for i, s in enumerate(species):
        #         for j in (0,1,2):
        #             if j != i:
        #                 img[:,i,j] = np.linspace(0., 1., 64)
        #     # plt.sca(cax)
        #     plt.imshow(img, extent=(0, 3, vmin, vmax), interpolation='nearest', aspect='auto')
        #     plt.xticks([0.5, 1.5, 2.5], label.split(', '))
        #     plt.xlim(0., 3.)
        #     cax.yaxis.tick_right()
        #     cax.xaxis.tick_top()
        #     plt.yticks = [2,3,4,5,6]
        #     cax.yaxis.set_label_position('right')
        #     plt.ylabel(r'$Flux / cm^{-2}s^{-1}$')
        #     # plt.sca(ax)
        # else:
        ticks = np.arange(int(vmin), int(vmax) + 1, dtype=int)
        plt.colorbar(cbar_im, cax=celsius.make_colorbar_cax(), ticks=ticks).set_label(label)

    if return_val:
        if return_val == "IMAGES":
            del blocks
            return ims
        elif return_val == "BLOCKS":
            return blocks
        else:
            print("Unrecognised return_value = " + str(return_value))

    del blocks
    del ims
    return