Example #1
0
def main(arg):  # dirnam_in = 'tst', ampl_in=35,res_in=0):

    ampl_in = list(arg)[0]
    res_in = list(arg)[1]
    dirnam_in = list(arg)[2]
    epos_in = list(arg)[3]

    print('dirnam_in', dirnam_in)
    print('epos_in', epos_in)

    if local == 0:
        data_pth = '/att/nobackup/sberton2/MLA/data/MLA_' + epos_in[:
                                                                    2]  # /home/sberton2/Works/NASA/Mercury_tides/data/'
        dataset = ''  # 'small_test/' #'test1/' #'1301/' #
        data_pth += dataset
        # load kernels
        spice.furnsh(
            '/att/nobackup/emazaric/MESSENGER/data/furnsh/furnsh.MESSENGER.def'
        )  # 'aux/mymeta')
    else:
        # data_pth = '/home/sberton2/Works/NASA/Mercury_tides/data/'  # /home/sberton2/Works/NASA/Mercury_tides/data/'
        # dataset = "test/"  # ''  # 'small_test/' #'1301/' #
        # data_pth += dataset
        # load kernels
        spice.furnsh(auxdir + 'mymeta')  # 'aux/mymeta')

    if parallel:
        # set ncores
        ncores = mp.cpu_count() - 1  # 8
        print('Process launched on ' + str(ncores) + ' CPUs')

    # out = spice.getfov(vecopts['INSTID'][0], 1)
    # updated w.r.t. SPICE from Mike's scicdr2mat.m
    vecopts['ALTIM_BORESIGHT'] = [0.0022105, 0.0029215, 0.9999932892]  # out[2]
    ###########################

    # generate list of epochs
    if new_illumNG and True:
        # read all MLA datafiles (*.TAB in data_pth) corresponding to the given time period
        allFiles = glob.glob(
            os.path.join(data_pth, 'MLAS??RDR' + epos_in + '*.TAB'))
        print("path+files")
        print(data_pth, epos_in)
        print(allFiles)

        # Prepare list of tracks
        tracknames = ['gtrack_' + fil.split('.')[0][-10:] for fil in allFiles]
        epo_in = []
        for track_id, infil in zip(tracknames, allFiles):
            track = track_id
            track = gtrack(vecopts)
            track.prepro(infil)
            epo_in.extend(track.ladata_df.ET_TX.values)

        epo_in = np.sort(np.array(epo_in))
    # print(epo_in)
    # print(epo_in.shape)
    # print(np.sort(epo_in)[0],np.sort(epo_in)[-1])
    # print(np.sort(epo_in)[-1])

    else:
        epo0 = 410270400  # get as input parameter
        # epo_tx = np.array([epo0+i for i in range(86400*7)])
        subpnts = 10
        epo_tx = np.array([epo0 + i / subpnts for i in range(86400 * subpnts)])

    # pass to illumNG
    if local:
        if new_illumNG:
            np.savetxt("tmp/epo_mla_" + epos_in + ".in", epo_tx, fmt="%4d")
            print("Do you have all of illumNG predictions?")
            exit()
        # path = '../aux/illumNG/'+epos_in+'_32ppd/' # sph_7d_mla/'  # _1s/'  #  sph/' #grd/' # use your path
        path = '../../../aux/illumNG/sph'  # mlatimes_'+epos_in # sph_7d_mla/'  # _1s/'  #  sph/' #grd/' # use your path
        illumNGf = glob.glob(path + "/bore*")
    else:
        if new_illumNG:
            np.savetxt("tmp/epo_mla_" + epos_in + ".in", epo_in, fmt="%10.5f")
            print("illumNG call")
            if not os.path.exists("illumNG/"):
                print('*** create and copy required files to ./illumNG')
                exit()

            shutil.copy("tmp/epo_mla_" + epos_in + ".in",
                        '../_MLA_Stefano/epo.in')
            illumNG_call = subprocess.call(
                ['sbatch', 'doslurmEM', 'MLA_raytraces.cfg'],
                universal_newlines=True,
                cwd="../_MLA_Stefano/")  # illumNG/")
            for f in glob.glob("../_MLA_Stefano/bore*"):
                shutil.move(
                    f,
                    auxdir + '/illumNG/grd/' + epos_in + "_" + f.split('/')[1])
        path = auxdir + 'illumng/mlatimes_' + epos_in + '/'  # sph/' # use your path
        print('illumng dir', path)
        illumNGf = glob.glob(path + "/bore*")

    # else:
    # launch illumNG directly
    df = prepro_ilmNG(illumNGf)
    print('illumNGf', illumNGf)

    if apply_topo:
        # read and interpolate DEM
        # # open netCDF file
        # nc_file = "/home/sberton2/Works/NASA/Mercury_tides/MSGR_DEM_USG_SC_I_V02_rescaledKM_ref2440km_4ppd_HgM008frame.GRD"
        # sim_gtrack.dem_xr = xr.open_dataset(nc_file)

        # prepare surface texture "stamp" and assign the interpolated function as class attribute
        # persistence = 0.65 to fit power law of Steinbrugge 2018 over scales 50m (spot-size) to 200m (spots distance)
        np.random.seed(62)
        shape_text = 1024
        res_text = 2**res_in
        depth_text = 5
        size_stamp = 0.25
        amplitude = ampl_in
        noise = perlin2d.generate_periodic_fractal_noise_2d(
            amplitude, (shape_text, shape_text), (res_text, res_text),
            depth_text,
            persistence=0.65)
        interp_spline = RectBivariateSpline(
            np.array(range(shape_text)) / shape_text * size_stamp,
            np.array(range(shape_text)) / shape_text * size_stamp, noise)
        sim_gtrack.apply_texture = interp_spline

    # Process tracks
    # tracks = []
    # for i in list(df.groupby('orbID').groups.keys()):
    #     if debug:
    #         print("Processing",i)
    #     tracks.append(sim_gtrack(vecopts, i))
    #
    # print(tracks)
    # print([tr.name for tr in tracks])

    if local:
        outdir_ = outdir + dirnam_in
    else:
        outdir_ = dirnam_in

    print("outdir = ", outdir_)

    if not os.path.exists(outdir_):
        os.makedirs(outdir_, exist_ok=True)

    # loop over all gtracks
    print('orbs = ', list(df.groupby('orbID').groups.keys()))
    args = ((sim_gtrack(vecopts, i), df, i, outdir_)
            for i in list(df.groupby('orbID').groups.keys()))

    if parallel and False:  # incompatible with grdtrack call ...
        # print((mp.cpu_count() - 1))
        pool = mp.Pool(processes=ncores)  # mp.cpu_count())
        _ = pool.map(sim_track, args)  # parallel
        pool.close()
        pool.join()
    else:
        _ = [sim_track(arg) for arg in args]  # seq
Example #2
0
def extract_dRvsLAT(fil, file_dem):
    track = gtrack(vecopts)

    track = track.load(fil)
    print("Processing track #", track.name)

    if SpInterp == 2:
        np.set_printoptions(suppress=True,
                            formatter={'float_kind': '{:0.3f}'.format})
        track.interpolate()
        # exit()

    df_data = track.ladata_df.copy()
    pd.set_option('display.float_format', lambda x: '%.3f' % x)
    # print(df_.ET_TX.round(3))
    # print(df_data.ET_TX.round(3))

    # print(df_data.columns)
    # print(df_data)
    mask = (df_data['LAT'] >= 82) & (df_data['LAT'] <= 84) & (
        df_data['chn']
        == 0) & (df_data['LON'] >= 120) & (df_data['LON'] <= 137)
    df_data = df_data.loc[mask]

    if len(df_data) == 0:
        print("## fit2dem: no data in bounding box")
        return None

    # gmt_in = 'gmt_' + track.name + '.in'
    # print(len(r_dem),len(df_data.LAT.values),len(texture_noise),len(gmt_in))
    # df_data = df_data.loc[df_data['ET_TX'].round(3).isin(df_['ET_TX'].round(3).values)]
    # r_dem = read_dem(gmt_in, df_data.LAT.values, df_data.LON.values)
    lattmp = df_data.LAT.values
    lontmp = df_data.LON.values
    lontmp[lontmp < 0] += 360

    # print(dem_xarr)
    if file_dem.split('.')[-1] == 'GRD':  # to read grd/netcdf files
        r_dem = get_demz_grd(file_dem, lontmp, lattmp) * 1.e3  #
    elif file_dem.split('.')[-1] == 'tif':  #'TIF': # to read geotiffs usgs
        r_dem = np.squeeze(get_demz_tiff(file_dem, lontmp, lattmp)) * 1.e3
    # print(r_dem)
    # exit()
    # r_dem = get_demz_at(dem_xarr, lattmp, lontmp) * 1.e3
    # print(r_dem)

    df_data.loc[:, 'altdiff_dem_data'] = df_data.R.values - (r_dem)
    dr_apriori = df_data.loc[:, 'altdiff_dem_data']

    # Fit track to DEM
    dr, dr_pre, ACRcorr, df_data = fit_track_to_dem(df_data, dem_file=file_dem)
    # print(np.max(np.abs(dr)))
    # df_data.loc[:, 'altdiff_dem'] = dr
    # exit()
    # df_data.loc[:, 'dr_apriori'] = dr_pre

    # update track.previous iter
    coeff_set_re = ['sol_dR/dA', 'sol_dR/dC',
                    'sol_dR/dR']  #, 'sol_dR/dRl', 'sol_dR/dPt']
    tmp = pd.DataFrame(ACRcorr).T
    tmp.columns = coeff_set_re
    track.sol_prev_iter = {'orb': tmp, 'glo': ''}

    print("Saving to ", fil)
    track.save(fil)

    # print(df_data)
    # # print(dr_apriori)
    # exit()
    # print(df_data.columns)

    return df_data[[
        'ET_TX', 'orbID', 'LAT', 'LON', 'dR_tid', 'dr_post', 'dr_pre'
    ]], ACRcorr  #, 'dr/dA', 'dr/dC', 'dr/dR', 'dr/dh2']]
Example #3
0
sys.path.insert(0,'/home/sberton2/projects/Mercury_tides/PyXover_sim')

from glob import glob
import pandas as pd
import numpy as np
import seaborn as sns

from src.pygeoloc.ground_track import gtrack
from examples.MLA.options import vecopts, outdir
import matplotlib.pyplot as plt

if __name__ == '__main__':

    sim_test = "sph"

    track_sim = gtrack(vecopts)
    simf = glob("/att/nobackup/sberton2/MLA/data/SIM_13/"+sim_test+"/0res_1amp/MLASIMRDR13010107*.TAB")

    for f in simf:
        track_sim.read_fill(f)

    print(track_sim.ladata_df)


    track_real = gtrack(vecopts)
    reaf = glob("/att/nobackup/sberton2/MLA/data/MLA_13/MLASCIRDR13010107*.TAB")
    for f in reaf:
        track_real.read_fill(f)

    sim_df = track_sim.ladata_df.apply(pd.to_numeric, errors='ignore', downcast='float')
    rea_df = track_real.ladata_df[1:].apply(pd.to_numeric, errors='ignore', downcast='float').reset_index()
Example #4
0
        if local:
            path = outdir + 'sim/' + exp + '/' + rghn + '/gtrack_' + str(
                ym)[:2] + '/gtrack_' + str(ym) + '*.pkl'
        else:
            path = outdir + 'sim/' + exp + '/' + rghn + '/gtrack_' + str(
                ym)[:2] + '/gtrack_' + str(ym) + '*.pkl'
        # path = '/home/sberton2/Works/NASA/Mercury_tides/out/sim/'+ spk + '/3res_20amp/gtrack_*' + '/' + '*.pkl'

        allFiles = glob.glob(os.path.join(path))
        allFiles = np.sort(allFiles)[:]
        # print("Processing ", ex)
        print(os.path.join(path))
        print("nfiles: ", len(allFiles))
        print(allFiles)

        prel_track = gtrack(vecopts)

        sol_df = []
        tid_df = []
        ACRcorr = []

        fit2dem = False

        if fit2dem:
            # loop over all gtracks
            if parallel:
                # print((mp.cpu_count() - 1))
                pool = mp.Pool(processes=mp.cpu_count())
                tid_df = pool.map(extract_dRvsLAT, allFiles)  # parallel
                pool.close()
                pool.join()
Example #5
0
def launch_xov(
    args
):  # pool.map functions have to stay on top level (not inside other functions) to avoid the "cannot be pickled" error
    track_idA = args[0]
    # print(track_id)
    comb = args[1]
    misycmb = args[2]
    par = args[3]
    mladata = args[4]
    outdir = args[5]

    if new_xov:  # and track_id=='1301232350':
        # print( "check", track_id, misycmb[par])
        if not os.path.isfile(outdir + 'xov/xov_' + track_idA + '_' +
                              misycmb[par][1] + '.pkl') or new_xov == 2:

            # print("Processing " + track_id + " ...")

            # try:
            #    trackA = track_id
            #    trackA = tracklist[str(track_id)]
            trackA = gtrack(vecopts)

            if monthly_sets:
                trackA = trackA.load(outdir + 'gtrack_' + misycmb[par][0][:2] +
                                     '/gtrack_' + track_idA + '.pkl')
            else:
                # trackA = trackA.load(outdir + 'gtrack_' + misycmb[par][0] + '/gtrack_' + track_id + '.pkl')
                trackA.ladata_df = mladata[
                    track_idA]  # faster and less I/O which overloads PGDA

            if not trackA == None and len(trackA.ladata_df) > 0:

                xov_tmp = track_idA
                xov_tmp = xov(vecopts)

                # print(comb)

                xovers_list = []
                xover_found = False
                # loop over all combinations containing track_id
                for track_idA, track_idB in [
                        s for s in comb if track_idA in s[0]
                ]:

                    # if debug:
                    #    print("Processing " + gtrackA + " vs " + gtrackB)

                    if track_idB > track_idA:
                        # try:
                        #        trackB = track_id
                        #        trackB = tracklist[str(gtrackB)]
                        trackB = gtrack(vecopts)

                        if monthly_sets:
                            trackB = trackB.load(outdir + 'gtrack_' +
                                                 misycmb[par][1][:2] +
                                                 '/gtrack_' + track_idB +
                                                 '.pkl')
                        else:
                            # trackB = trackB.load(outdir + 'gtrack_' + misycmb[par][1] + '/gtrack_' + gtrackB + '.pkl')
                            trackB.ladata_df = mladata[
                                track_idB]  # faster and less I/O which overloads PGDA

                        if not trackB == None and len(trackB.ladata_df) > 0:

                            # # TODO remove when recomputing
                            # trackA.ladata_df[['X_NPstgprj', 'Y_NPstgprj']] = trackA.ladata_df[['X_stgprj', 'Y_stgprj']]
                            # trackB.ladata_df[['X_NPstgprj', 'Y_NPstgprj']] = trackB.ladata_df[['X_stgprj', 'Y_stgprj']]
                            # trackA.ladata_df[] = trackA.ladata_df.rename(index=str, columns={"X_stgprj": "X_NPstgprj", "Y_stgprj": "Y_NPstgprj"})
                            # trackB.ladata_df = trackB.ladata_df.rename(index=str, columns={"X_stgprj": "X_NPstgprj", "Y_stgprj": "Y_NPstgprj"})

                            # looping over all track combinations and updating the general df xov_tmp.xovers
                            xover_found = xov_tmp.setup([trackA, trackB])

                    if new_algo and xover_found:
                        xovers_list.append(xov_tmp.xovtmp)
                    # except:
                    #     print(
                    #         'failed to load trackB ' + outdir + 'gtrack_' + gtrackB + '.pkl' + ' to process ' + outdir + 'gtrack_' + track_id + '.pkl')

                if new_algo:
                    xov_tmp.xovers = pd.DataFrame(xovers_list)
                    xov_tmp.xovers.reset_index(drop=True, inplace=True)
                    xov_tmp.xovers['xOvID'] = xov_tmp.xovers.index

                # for each gtrackA, write
                # print([s for s in comb if track_id in s[0]])
                if [s for s in comb if track_idA in s[0]
                    ] and len(xov_tmp.xovers) > 0:
                    # get xover LAT and LON
                    xov_tmp.get_xov_latlon(trackA)

                    # Save to file
                    if not os.path.exists(outdir + 'xov/'):
                        os.mkdir(outdir + 'xov/')
                    if new_algo:
                        # Save to temporary folder
                        # if not os.path.exists(outdir + 'xov/tmp/'):
                        #     os.mkdir(outdir + 'xov/tmp/')
                        # xov_tmp.save(outdir + 'xov/tmp/xov_' + track_idA + '_' + misycmb[par][1] + '.pkl')

                        # just pass rough_xovs to next step
                        return xov_tmp.xovers
                    else:
                        xov_tmp.save(outdir + 'xov/xov_' + track_idA + '_' +
                                     misycmb[par][1] + '.pkl')
                        print('Xov for ' + track_idA +
                              ' processed and written to ' + outdir +
                              'xov/xov_' + track_idA + '_' + misycmb[par][1] +
                              '.pkl @' +
                              time.strftime("%H:%M:%S", time.gmtime()))
                        return track_idA

        # except:
        #     print(
        #         'failed to load trackA ' + outdir + 'gtrack_' + track_id + '.pkl' + ' to process xov from ' + outdir + 'gtrack_' + track_id + '.pkl')

        else:

            #      track = track.load('out/xov_'+gtrackA+'.pkl')
            print('Xov for ' + track_idA + ' already exists in ' + outdir +
                  'xov_' + track_idA + '_' + misycmb[par][1] + '.pkl @' +
                  time.strftime("%H:%M:%S", time.gmtime()))
Example #6
0
def main(args):
    from examples.MLA.options import parallel, outdir, auxdir, local, vecopts

    print(args)

    # read input args
    print('Number of arguments:', len(args), 'arguments.')
    print('Argument List:', str(args))

    cmb_y_in = args[0]
    indir_in = args[1]
    outdir_in = args[2]
    iter_in = args[-1]

    # locate data
    data_pth = basedir  # '/att/nobackup/sberton2/MLA/data/'  # /home/sberton2/Works/NASA/Mercury_tides/data/'
    dataset = indir_in  # 'test/' #'small_test/' #'1301/' #
    data_pth += dataset
    # # load kernels
    # spice.furnsh(auxdir + 'mymeta')  # 'aux/mymeta')

    # set ncores
    ncores = mp.cpu_count() - 1  # 8

    if parallel:
        print('Process launched on ' + str(ncores) + ' CPUs')

    ##############################################

    # Setup some useful options
    # vecopts = {'SCID': '-236',
    #            'SCNAME': 'MESSENGER',
    #            'SCFRAME': -236000,
    #            'INSTID': (-236500, -236501),
    #            'INSTNAME': ('MSGR_MLA', 'MSGR_MLA_RECEIVER'),
    #            'PLANETID': '199',
    #            'PLANETNAME': 'MERCURY',
    #            'PLANETRADIUS': 2440.,
    #            'PLANETFRAME': 'IAU_MERCURY',
    #            'OUTPUTTYPE': 1,
    #            'ALTIM_BORESIGHT': '',
    #            'INERTIALFRAME': 'J2000',
    #            'INERTIALCENTER': 'SSB',
    #            'PARTDER': ''}

    # out = spice.getfov(vecopts['INSTID'][0], 1)
    # updated w.r.t. SPICE from Mike's scicdr2mat.m
    vecopts['ALTIM_BORESIGHT'] = [0.0022105, 0.0029215, 0.9999932892]  # out[2]
    ###########################

    # print(vecopts['ALTIM_BORESIGHT'])

    # apply pointing corrections
    # vecin = {'ZPT':vecopts['ALTIM_BORESIGHT']}

    # setup all combinations between years
    par = int(cmb_y_in)

    if monthly_sets:
        misy = ['11', '12', '13', '14', '15']
        months = np.arange(1, 13, 1)
        misy = [x + f'{y:02}' for x in misy for y in months]
        misy = ['0801', '0810'] + misy[2:-8]
    else:
        misy = ['08', '11', '12', '13', '14', '15']

    misycmb = [x for x in itert.combinations_with_replacement(misy, 2)]
    # print(misycmb)
    if debug:
        print("Choose grid element among:",
              dict(map(reversed, enumerate(misycmb))))
    print(par, misycmb[par], " has been selected!")

    ###########################
    startInit = time.time()

    if iter_in == 0 and compute_input_xov:

        # -------------------------------
        # File reading and ground-tracks computation
        # -------------------------------

        # read all MLA datafiles (*.TAB in data_pth) corresponding to the given years
        # for orbitA and orbitB.
        # Geoloc, if active, will process all files in A+B. Xov will only process combinations
        # of orbits from A and B
        # allFilesA = glob.glob(os.path.join(data_pth, 'MLAS??RDR' + misycmb[par][0] + '*.TAB'))
        # allFilesB = glob.glob(os.path.join(data_pth, 'MLAS??RDR' + misycmb[par][1] + '*.TAB'))

        # print(os.path.join(outdir, indir_in + misycmb[par][0][:2] + '/gtrack_'+misycmb[par][0]+'*'))
        # print(glob.glob(os.path.join(outdir, indir_in + misycmb[par][0][:2] + '/gtrack_'+misycmb[par][0]+'*')))

        if monthly_sets:
            allFilesA = glob.glob(
                os.path.join(
                    outdir, indir_in + misycmb[par][0][:2] + '/gtrack_' +
                    misycmb[par][0] + '*'))
            allFilesB = glob.glob(
                os.path.join(
                    outdir, indir_in + misycmb[par][1][:2] + '/gtrack_' +
                    misycmb[par][1] + '*'))
        else:
            allFilesA = glob.glob(
                os.path.join(outdir, indir_in + misycmb[par][0] + '/*'))
            allFilesB = glob.glob(
                os.path.join(outdir, indir_in + misycmb[par][1] + '/*'))

        # if misycmb[par][0] == misycmb[par][1]:
        #     allFiles = allFilesA
        # else:
        #     allFiles = allFilesA + allFilesB

        # print(allFiles)

        # xovnames = ['xov_' + fil.split('.')[0][-10:] for fil in allFiles]
        # trackxov_list = []

        # Compute all combinations among available orbits, where first orbit is in allFilesA and second orbit in allFilesB (exclude same tracks cmb)
        # comb=np.array(list(itert.combinations([fil.split('.')[0][-10:] for fil in allFiles], 2))) # this computes comb btw ALL files
        comb = list(
            itert.product([fil.split('.')[0][-10:] for fil in allFilesA],
                          [fil.split('.')[0][-10:] for fil in allFilesB]))
        comb = np.array([c for c in comb if c[0] != c[1]])

        # if iter>0, don't test all combinations, only those resulting in xovers at previous iter
        # TODO, check wether one could safely save time by only considering xovers with a given weight
        iter = int(outdir_in.split('/')[1].split('_')[-1])
        if iter > 0:
            comb = select_useful_comb(comb, iter, outdir_in)

        # print(comb)

        # load all tracks
        # tmp = [gtrack(vecopts) for i in range(len(allFiles))]

        # if False:
        #     tracklist = {}
        #     for idx, fil in enumerate(allFiles):
        #         try:
        #             print(outdir)
        #             _ = tmp[idx].load(outdir + '/gtrack_' + fil.split('.')[0][-10:] + '.pkl')
        #             tracklist[str(_.name)] = _
        #         except:
        #             print('Failed to load' + outdir + '/gtrack_' + fil.split('.')[0][-10:] + '.pkl')

        # read all ladata needed for these combinations
        # print(comb)
        # print(comb.shape,np.ravel(comb).shape,len(set(np.ravel(comb))))
        # print(set(np.ravel(comb)))
        track_obj = gtrack(vecopts)
        mladata = {}
        cols = [
            'ET_TX', 'TOF', 'orbID', 'seqid', 'ET_BC', 'offnadir', 'LON',
            'LAT', 'R', 'X_stgprj', 'Y_stgprj'
        ]
        for track_id in set(np.ravel(comb)):
            track_obj = track_obj.load(outdir + outdir_in + 'gtrack_' +
                                       track_id[:2] + '/gtrack_' + track_id +
                                       '.pkl')
            mladata[track_id] = track_obj.ladata_df.loc[:, cols]
        # print(len(mladata))
        # exit()
        # transform to df to get memory
        # print("total memory:",pd.from_dict(mladata).memory_usage(deep=True).sum()*1.e-6)
        # exit()

        endInit = time.time()
        print('----- Runtime Init= ' + str(endInit - startInit) +
              ' sec -----' + str((endInit - startInit) / 60.) + ' min -----')

        # -------------------------------
        # Xovers setup
        # -------------------------------

        startXov2 = time.time()

        args = ((fil.split('.')[0][-10:], comb, misycmb, par, mladata,
                 outdir + outdir_in) for fil in allFilesA)
        print("Looking for (potential) xovers within combinations of",
              len(allFilesA), "tracks (A) with", len(allFilesB),
              "tracks (B)...")

        # loop over all gtracks
        # parallel = 1
        if parallel:
            # close?join?
            if local:
                # forks everything, if much memory needed, use the remote option with get_context
                from tqdm.contrib.concurrent import process_map  # or thread_map
                result = process_map(launch_xov,
                                     args,
                                     max_workers=ncores,
                                     total=len(allFilesA))
            else:
                # filnams_loop = [fil.split('.')[0][-10:] for fil in allFiles]
                # print(filnams_loop)
                # print((mp.cpu_count() - 1))
                pool = mp.get_context("spawn").Pool(
                    processes=ncores)  # mp.cpu_count())
                # store list of tracks with xovs
                result = pool.map(launch_xov, args)  # parallel


# ######################################
#             ncores = mp.cpu_count() - 1  # 8
#
# if local:
#     from tqdm import tqdm
#     pbar = tqdm(total=len(allFilesA))
#
#     def update(*a):
#         pbar.update()
#
# result = []
# with mp.get_context("spawn").Pool(processes=ncores) as pool:
#     for fil in allFilesA:
#         if local:
#             result.append(pool.apply_async(launch_xov, args=(fil.split('.')[0][-10:], comb, misycmb, par, mladata, outdir + outdir_in), callback=update))
#         else:
#             result.append(pool.apply_async(launch_xov, args=(fil.split('.')[0][-10:], comb, misycmb, par, mladata, outdir + outdir_in)))
#
#     pool.close()
#     pool.join()
# # result.get blocks processing until all results of apply_async are fetched
# result = [r.get() for r in result]

        else:
            result = []  # seq
            if local:
                from tqdm import tqdm
                for arg in tqdm(args, total=len(allFilesA)):
                    result.append(launch_xov(arg))
            else:
                for arg in args:
                    result.append(launch_xov(arg))
            # print(result)

        if len(result) > 0:
            if new_algo:
                rough_xov = pd.concat(result).reset_index()
            else:
                acttracks = np.unique(
                    np.array([x for x in result if x is not None]).flatten())
        else:
            print("### PyXover: no xovers between the available tracks")
            exit()

        endXov2 = time.time()
        print('----- Runtime Xov2 = ' + str(endXov2 - startXov2) +
              ' sec -----' + str((endXov2 - startXov2) / 60.) + ' min -----')

    else:  # xovs will be taken from old iter
        rough_xov = pd.DataFrame()

    # called either with results from xov_rough (iter=0) or with empty df and xovers from old solution
    if new_algo:
        print("Calling a new awesome routine!!")
        xov_prc_iters_run(outdir_in, iter_in, misycmb[par], rough_xov)

    #############################################
    endXov3 = time.time()
    print('----- Runtime Xov rough+fine = ' + str(endXov3 - startInit) +
          ' sec -----' + str((endXov3 - startInit) / 60.) + ' min -----')
Example #7
0
                np.linalg.norm(np.transpose(MGRx.evalCby(t_spc)) - xv_spc,
                               axis=1),
                label="Cheby",
                color='C2')
        ax.legend()

    fig.savefig('tmp/testInterp.png')  # save the figure to file
    plt.close(fig)  # close the figure

    return np.max(
        np.linalg.norm(np.transpose(MGRx.evalCby(t_spc)) - xv_spc, axis=1))


if __name__ == '__main__':

    # track_id = '1301052351'
    files = glob.glob(
        '/home/sberton2/Works/NASA/Mercury_tides/out/sim/1301_per2_0/0res_1amp/gtrack_13/gtrack_*.pkl'
    )
    trackA = gtrack(vecopts)
    # load kernels
    spice.furnsh(auxdir + 'mymeta')

    for f in files:
        trackA = trackA.load(f)
        # simple_test()
        max = testInterp(trackA.ladata_df, vecopts)
        print(f.split('/')[-1], max)

    exit()
Example #8
0
def main(args):

    print(args)

    # read input args
    print('Number of arguments:', len(args), 'arguments.')
    print('Argument List:', str(args))

    epo_in = args[0]
    indir_in = args[1]
    outdir_in = args[2]
    iter_in = args[4]

    # locate data
    data_pth = f'{rawdir}'
    dataset = indir_in
    data_pth += dataset

    # load kernels
    if not local:
        spice.furnsh([f'{auxdir}furnsh.MESSENGER.def', f'{auxdir}mymeta_pgda'])
    else:
        spice.furnsh(f'{auxdir}mymeta')
    # or, add custom kernels
    # load additional kernels
    # spice.furnsh(['XXX.bsp'])

    # set ncores
    ncores = mp.cpu_count() - 1  # 8

    if parallel:
        print('Process launched on ' + str(ncores) + ' CPUs')

    ##############################################
    # updated w.r.t. SPICE from Mike's scicdr2mat.m
    vecopts['ALTIM_BORESIGHT'] = [0.0022105, 0.0029215, 0.9999932892]  # out[2]
    ###########################

    # -------------------------------
    # File reading and ground-tracks computation
    # -------------------------------

    startInit = time.time()

    # read all MLA datafiles (*.TAB in data_pth) corresponding to the given years
    # for orbitA and orbitB.
    allFiles = glob.glob(os.path.join(data_pth,
                                      'MLAS??RDR' + epo_in + '*.TAB'))

    endInit = time.time()
    print('----- Runtime Init= ' + str(endInit - startInit) + ' sec -----' +
          str((endInit - startInit) / 60.) + ' min -----')

    startPrepro = time.time()

    # Prepare list of tracks to geolocalise
    tracknames = ['gtrack_' + fil.split('.')[0][-10:] for fil in allFiles[:]]

    if new_gtrack:

        # Import solution at previous iteration
        if int(iter_in) > 0:
            tmp = Amat(vecopts)
            tmp = tmp.load(('_').join((
                (outdir +
                 ('/').join(outdir_in.split('/')[:-2]))).split('_')[:-1]) +
                           '_' + str(iter_in - 1) + '/' +
                           outdir_in.split('/')[-2] + '/Abmat_' +
                           ('_').join(outdir_in.split('/')[:-1]) + '.pkl')
            import_prev_sol = hasattr(tmp, 'sol4_pars')
            if import_prev_sol:
                orb_sol, glo_sol, sol_dict = accum_utils.analyze_sol(
                    tmp, tmp.xov)
        # epo_in=[]
        tracks = []
        for track_id, infil in zip(tracknames, allFiles):
            track = track_id

            track = gtrack(vecopts)
            # try:
            # Read and fill
            track.prepro(infil)
            # except:
            #    print('Issue in preprocessing for '+track_id)
            # epo_in.extend(track.ladata_df.ET_TX.values)

            if int(iter_in) > 0 and import_prev_sol:
                try:
                    track.pert_cloop_0 = tmp.pert_cloop_0.loc[str(
                        track.name)].to_dict()
                except:
                    if debug:
                        print("No pert_cloop_0 for ", track.name)
                    pass

                regex = re.compile(track.name + "_dR/d.*")
                soltmp = [('sol_' + x.split('_')[1], v)
                          for x, v in tmp.sol_dict['sol'].items()
                          if regex.match(x)]

                if len(soltmp) > 0:
                    stdtmp = [('std_' + x.split('_')[1], v)
                              for x, v in tmp.sol_dict['std'].items()
                              if regex.match(x)]
                    soltmp = pd.DataFrame(
                        np.vstack([('orb', str(track.name)), soltmp,
                                   stdtmp])).set_index(0).T

                    if debug:
                        print("orbsol prev iter")
                        print(orb_sol.reset_index().orb.values)
                        print(orb_sol.columns)
                        print(str(track.name))
                        print(orb_sol.loc[orb_sol.reset_index().orb.values ==
                                          str(track.name)])

                    track.sol_prev_iter = {'orb': soltmp, 'glo': glo_sol}
                else:
                    track.sol_prev_iter = {'orb': orb_sol, 'glo': glo_sol}
            # if first iter, check if track has been pre-processed by fit2dem and import corrections
            elif int(iter_in) == 0:
                try:
                    gtrack_fit2dem = outdir + outdir_in + '/' + track_id + '.pkl'
                    fit2dem_res = gtrack(vecopts)
                    fit2dem_res = fit2dem_res.load(
                        gtrack_fit2dem).sol_prev_iter
                    # if debug:
                    print("Solution of fit2dem for file",
                          track_id + ".pkl imported: \n", fit2dem_res['orb'])
                    track.sol_prev_iter = fit2dem_res
                except:
                    True

            tracks.append(track)

        # epo_in = np.array(epo_in)
        # print(epo_in)
        # print(epo_in.shape)
        # print(np.sort(epo_in)[0])
        # print(np.sort(epo_in)[-1])
        # np.savetxt("tmp/epo_mla_1301.in", epo_in, fmt="%10.5f")

        if SpInterp == 3:
            print('Orbit and attitude data loaded for years 20' +
                  str(misycmb[par][0]) + ' and 20' + str(misycmb[par][1]))
            endPrepro = time.time()
            print('----- Runtime Init= ' + str(endPrepro - startPrepro) +
                  ' sec -----' + str((endPrepro - startPrepro) / 60.) +
                  ' min -----')
            exit()

    endPrepro = time.time()
    print('----- Runtime Prepro= ' + str(endPrepro - startPrepro) +
          ' sec -----' + str((endPrepro - startPrepro) / 60.) + ' min -----')

    startGeoloc = time.time()

    args = ((tr, fil, outdir_in) for (tr, fil) in zip(tracks, allFiles))

    # loop over all gtracks
    if parallel:
        # print((mp.cpu_count() - 1))
        if local:
            # forks everything, if much memory needed, use the remote option with get_context
            from tqdm.contrib.concurrent import process_map  # or thread_map
            _ = process_map(launch_gtrack,
                            args,
                            max_workers=ncores,
                            total=len(allFiles))
        else:
            pool = mp.Pool(processes=ncores)  # mp.cpu_count())
            _ = pool.map(launch_gtrack, args)  # parallel
            pool.close()
            pool.join()
    else:
        from tqdm import tqdm
        for arg in tqdm(args):
            launch_gtrack(arg)  # seq

    endGeoloc = time.time()
    print('----- Runtime Geoloc= ' + str(endGeoloc - startGeoloc) +
          ' sec -----' + str((endGeoloc - startGeoloc) / 60.) + ' min -----')