Esempio n. 1
0
def ine_block_mono(sat,
                   dt_in,
                   extra_intrvl_strt=.1,
                   extra_intrvl_end=.4,
                   step=300):

    Fields = [
        'orb____1', 'orb____2', 'orb____3', 'orb____4', 'orb____5', 'orb____6',
        'orb___db', 'orb_s2db', 'orb_c2db', 'orb_s4db', 'orb_c4db', 'orb___yb',
        'orb___xb', 'orb_sixb', 'orb_coxb', 'orb___cr'
    ]

    mjd = np.floor(conv.dt2MJD(dt_in))
    mjd_strt = mjd - extra_intrvl_strt
    mjd_end = mjd + extra_intrvl_end + 1

    Lines = []

    l1 = " sat_nr  : " + sat + "\n"
    l2 = " stepsize: {:3}  {:6.2f}\n".format(sat, step)

    Lines.append(l1)
    Lines.append(l2)

    for field in Fields:
        line = " {:}: {:3}  0.000000000000000E+00 {:11.5f} {:11.5f}\n".format(
            field, sat, mjd_strt, mjd_end)
        Lines.append(line)

    Lines.append(" end_sat\n")

    str_out = "".join(Lines)

    return str_out
def write_ine_dummy_file(Sat_list,
                         dt_in,
                         extra_intrvl_strt=.1,
                         extra_intrvl_end=.4,
                         step=300,
                         out_file_path=None):
    """
    Write an EPOS INE dummy (empty values) file
    """

    Lines = []

    mjd = np.floor(conv.dt2MJD(dt_in))
    mjd_strt = mjd - extra_intrvl_strt
    mjd_end = mjd + extra_intrvl_end + 1

    datestr = conv.dt2str(dt.datetime.now(), str_format='%Y/%m/%d %H:%M:%S')

    mjd_strt_deci = mjd_strt - np.floor(mjd_strt)

    head_proto = """%=INE 1.00 {:} NEWSE=INE+ORBCOR                                                                                 
+global
 day_info: 
 epoch   :                            {:5}  {:16.14f}
 interval:                            {:11.5f} {:11.5f}
 stepsize:      {:6.2f}
-global
+initial_orbit
"""
    head = head_proto.format(datestr, int(mjd), 0, mjd_strt, mjd_end, step)

    Lines.append(head)

    for sat in Sat_list:
        Lines.append(
            "******************************************************************\n"
        )
        sat_str = ine_block_mono(sat, dt_in, extra_intrvl_strt,
                                 extra_intrvl_end, step)
        Lines.append(sat_str)
        Lines.append(
            "******************************************************************\n"
        )

    str_end = """-initial_orbit
%ENDINE
"""

    Lines.append(str_end)

    str_out = "".join(Lines)

    if out_file_path:
        with open(out_file_path, "w") as f:
            f.write(str_out)
            f.close()

    return str_out
Esempio n. 3
0
def write_sp3(SP3_DF_in, outpath, skip_null_epoch=True, force_format_c=False):
    """
    Write DOCSTRING
    
    skip_null_epoch: Do not write an epoch if all sats are null (filtering)

    """
    ################## MAIN DATA
    LinesStk = []

    SP3_DF_wrk = SP3_DF_in.sort_values(["epoch", "sat"])

    EpochRawList = SP3_DF_wrk["epoch"].unique()
    SatList = sorted(SP3_DF_wrk["sat"].unique())
    SatList = list(reversed(SatList))
    SatListSet = set(SatList)
    EpochUsedList = []

    for epoc in EpochRawList:
        SP3epoc = pd.DataFrame(SP3_DF_wrk[SP3_DF_wrk["epoch"] == epoc])
        ## Missing Sat
        MissingSats = SatListSet.difference(set(SP3epoc["sat"]))

        for miss_sat in MissingSats:
            miss_line = SP3epoc.iloc[0].copy()
            miss_line["sat"] = miss_sat
            miss_line["const"] = miss_sat[0]
            miss_line["x"] = 0.000000
            miss_line["y"] = 0.000000
            miss_line["z"] = 0.000000
            miss_line["clk"] = 999999.999999

            SP3epoc = SP3epoc.append(miss_line)

        SP3epoc.sort_values("sat", inplace=True, ascending=False)
        timestamp = conv.dt2sp3_timestamp(conv.numpy_datetime2dt(epoc)) + "\n"

        linefmt = "P{:}{:14.6f}{:14.6f}{:14.6f}{:14.6f}\n"

        LinesStkEpoch = []
        sum_val_epoch = 0
        for ilin, lin in SP3epoc.iterrows():
            line_out = linefmt.format(lin["sat"], lin["x"], lin["y"], lin["z"],
                                      lin["clk"])

            sum_val_epoch += lin["x"] + lin["y"] + lin["z"]

            LinesStkEpoch.append(line_out)

        ### if skip_null_epoch activated, print only if valid epoch
        if not (np.isclose(sum_val_epoch, 0) and skip_null_epoch):
            LinesStk.append(timestamp)  # stack the timestamp
            LinesStk = LinesStk + LinesStkEpoch  # stack the values
            EpochUsedList.append(epoc)  # stack the epoc as dt

    ################## HEADER
    ######### SATELLITE LIST

    Satline_stk = []
    Sigmaline_stk = []

    if force_format_c:
        nlines = 5
    else:
        div, mod = np.divmod(len(SatList), 17)

        if div < 5:
            nlines = 5
        else:
            nlines = div

            if mod != 0:
                nlines += 1

    for i in range(nlines):
        SatLine = SatList[17 * i:17 * (i + 1)]
        SatLineSigma = len(SatLine) * " 01"

        if len(SatLine) < 17:
            complem = " 00" * (17 - len(SatLine))
        else:
            complem = ""

        if i == 0:
            nbsat4line = len(SatList)
        else:
            nbsat4line = ''

        satline = "+  {:3}   ".format(nbsat4line) + "".join(
            SatLine) + complem + "\n"
        sigmaline = "++         0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0\n"
        sigmaline = "++       " + SatLineSigma + complem + "\n"

        Satline_stk.append(satline)
        Sigmaline_stk.append(sigmaline)

    ######### 2 First LINES
    start_dt = conv.numpy_datetime2dt(np.min(EpochUsedList))

    header_line1 = "#cP" + conv.dt2sp3_timestamp(
        start_dt, False) + "     {:3}".format(
            len(EpochUsedList)) + "   u+U IGSXX FIT  XXX\n"

    delta_epoch = int(utils.most_common(np.diff(EpochUsedList) * 10**-9))
    MJD = conv.dt2MJD(start_dt)
    MJD_int = int(np.floor(MJD))
    MJD_dec = MJD - MJD_int
    gps_wwww, gps_sec = conv.dt2gpstime(start_dt, False, "gps")

    header_line2 = "## {:4} {:15.8f} {:14.8f} {:5} {:15.13f}\n".format(
        gps_wwww, gps_sec, delta_epoch, MJD_int, MJD_dec)

    ######### HEADER BOTTOM
    header_bottom = """%c M  cc GPS ccc cccc cccc cccc cccc ccccc ccccc ccccc ccccc
%c cc cc ccc ccc cccc cccc cccc cccc ccccc ccccc ccccc ccccc
%f  1.2500000  1.025000000  0.00000000000  0.000000000000000
%f  0.0000000  0.000000000  0.00000000000  0.000000000000000
%i    0    0    0    0      0      0      0      0         0
%i    0    0    0    0      0      0      0      0         0
/* PCV:IGSXX_XXXX OL/AL:FESXXXX  NONE     YN CLK:CoN ORB:CoN
/*     GeodeZYX Toolbox Output
/*
/*
"""

    ################## FINAL STACK

    FinalLinesStk = []

    FinalLinesStk.append(header_line1)
    FinalLinesStk.append(header_line2)
    FinalLinesStk = FinalLinesStk + Satline_stk + Sigmaline_stk
    FinalLinesStk.append(header_bottom)
    FinalLinesStk = FinalLinesStk + LinesStk + ["EOF"]

    FinalStr = "".join(FinalLinesStk)

    F = open(outpath, "w+")
    F.write(FinalStr)
def ESMGFZ_extrapolator(path_or_netcdf_object_in,
                        time_xtrp,
                        lat_xtrp,
                        lon_xtrp,
                        wished_values=("duV","duNS","duEW"),
                        output_type = "DataFrame",
                        debug=False,verbose=True,
                        time_smart=True,
                        interp_method="splinef2d"):
    """
    Extrapolate loading values from the EMSGFZ models
    esmdata.gfz-potsdam.de:8080/

    Parameters
    ----------
    path_or_netcdf_object_in : string, list of strings or NetCDF object
        Input 
        can be a file path (string), a list of string (will be concatenated)
        or direcly the NetCDF object (faster).
    time_xtrp : float or float iterable
        time for the wished extrapolated values
        for daily files: hours of day [0..23].
        for yearly files: day of years [0..364].
    lat_xtrp : float or float iterable
        latitude component for the wished extrapolated values
        ranging from [-90..90]
    lon_xtrp : float or float iterable
        longitude component for the wished extrapolated values.
        ranging from [-180..180]
    wished_values : tuple of string, optional
        the components of the extrapolated values. 
        The default is ("duV","duNS","duEW").
    output_type : str, optional
        Choose the output type.
        "DataFrame","dict","array","tuple","list"
        The default is "DataFrame".
    debug : bool, optional
        returns the NetCDF object for debug purposes

    Returns
    -------
    Points_out : see output_type
        The extrapolated values.

    """
    
    if not utils.is_iterable(time_xtrp):
        time_xtrp = [time_xtrp]
    if not utils.is_iterable(lat_xtrp):
        lat_xtrp = [lat_xtrp]
    if not utils.is_iterable(lon_xtrp):
        lon_xtrp = [lon_xtrp]
    
    Points_xtrp = (time_xtrp,lat_xtrp,lon_xtrp)

    if type(path_or_netcdf_object_in) is str:
        NC =  nc.Dataset(path_or_netcdf_object_in)
    elif type(path_or_netcdf_object_in) in (nc.Dataset,nc.MFDataset):
        NC = path_or_netcdf_object_in
    else:
        NC = nc.MFDataset(sorted(path_or_netcdf_object_in))
        
    if debug:
        return NC
    
    time_orig = np.array(NC['time'][:])
    
    if time_smart:
        # we work in MJD
        start_date = conv.dt2MJD(conv.str_date2dt(NC['time'].units[11:]))
        if len(time_orig) <= 366:
            time = start_date - .0 + time_orig 
        else:
            time = start_date - .0 + np.array(range(len(time_orig)))
        
    lat  = np.flip(np.array(NC['lat'][:]))  ### we flip the lat because not ascending !
    lon  = np.array(NC['lon'][:])
    
    Points = (time,lat,lon)    
    
    WishVals_Stk = []
    WishVals_dic = dict()
    
    #### prepare dedicated time, lat, lon columns
    Points_xtrp_array = np.array(list(itertools.product(*Points_xtrp)))
    WishVals_dic['time'] = Points_xtrp_array[:,0]
    WishVals_dic['lat']  = Points_xtrp_array[:,1]
    WishVals_dic['lon']  = Points_xtrp_array[:,2]
    
    
    ### do the interpolation for the wished value
    for wishval in wished_values:
        if verbose:
            print("INFO:",wishval,"start interpolation at",dt.datetime.now())
        
        #### Val = np.array(NC[wishval]) ### Slow
        Val = NC[wishval][:]
        
        if verbose:
            print("INFO:",wishval,"grid loaded at",dt.datetime.now())
            
        Val = np.flip(Val,1) ### we flip the lat because not ascending !
        Val_xtrp = scipy.interpolate.interpn(Points,Val,
                                             Points_xtrp,
                                             method=interp_method)
        WishVals_Stk.append(Val_xtrp)
        WishVals_dic[wishval] = Val_xtrp
    
    #### choose the output
    if output_type == "DataFrame":
        Points_out = pd.DataFrame(WishVals_dic)
        if time_smart:
            Points_out['time_dt'] = conv.MJD2dt(Points_out['time'])
    elif output_type == "dict":
        Points_out = WishVals_dic
    elif output_type == "array":
        Points_out = np.column_stack(WishVals_Stk)
    elif output_type == "tuple":
        Points_out = tuple(WishVals_Stk)
    elif output_type == "list":
        Points_out = list(WishVals_Stk)
    else:
        Points_out = WishVals_Stk
        
    return Points_out
    
def timeline_plotter(datadico,start = dt.datetime(1980,1,1) ,
                     end = dt.datetime(2099,1,1),dots_plot=False,
                     jul_date_plot=False,datadico_anex_list = [],
                     use_only_stats_of_main_datadico=False,
                     colordico_for_main_datadico=None):
    """
    A simpler version has been commited to geodezyx toolbox for archive
    on 20180118 15:59A
    """

    fig , ax = plt.subplots()
    ax.xaxis.grid(True)
    ax.yaxis.grid(True)

    if not use_only_stats_of_main_datadico:
        stats_concat_list = list(datadico.keys()) + sum([list(e.keys()) for e in datadico_anex_list], [])
        stats_concat_list = list(reversed(sorted(list(set(stats_concat_list)))))
    else:
        stats_concat_list = list(reversed(sorted(list(datadico.keys()))))

    # the plot has not the same behavior if it is the morning or not 
    # (rinexs timelines wont be ploted if it is the morning)
    if dt.datetime.now().hour < 12:
        morning_shift = dt.timedelta(days=1)
    else:
        morning_shift = dt.timedelta(days=0)

    legend_list = [] # must be here before the loop
    for i,stat in enumerate(stats_concat_list):
        # PART 1 : PLOT MAIN DATADICO
        if not stat in datadico.keys():
            continue

        #T = Time, O = Station name (Observation)
        Torig = [ e[-1] for e in datadico[stat] ]
        Oorig = [ e[1]  for e in datadico[stat] ]

        # Time windowing
        T , O = [] , []
        for t , o in zip(Torig , Oorig):
            if ( start - morning_shift ) <= t <= end: 
                T.append(t)
                O.append(o)

        T,O = utils.sort_binom_list(T,O)

        TMJD=conv.dt2MJD(T)
        TGrpAll = utils.consecutive_groupIt(TMJD,True) # Tuples (start,end) of continue period

        for tgrp in TGrpAll:
            color1 = 'C0'
            color2 = ''
            extra_archive = False

            ###*** managing colors
            if colordico_for_main_datadico:
                igrpstart    = TMJD.index(tgrp[0])
                igrpend      = TMJD.index(tgrp[1])
                Ogrp         = O[igrpstart:igrpend+1]
                Tgrp         = TMJD[igrpstart:igrpend+1] # all dates in the current continue period

                opt_set      = list(set(Ogrp))

                if len(opt_set) == 1: # Regular case : only one archive
                    if opt_set[0] in colordico_for_main_datadico:
                        color1 = colordico_for_main_datadico[opt_set[0]]
                else: # several archives ... so the line has to be splited in segments
                    extra_archive = True
                    OSubgrp = utils.identical_groupIt(Ogrp)
                    TSubgrp = utils.sublistsIt(Tgrp , [len(e) for e in OSubgrp])

                    Tgrp_plt      = [ (e[0] , e[-1] + 1)    for e in TSubgrp ] # +1 because the end boundary day is not included
                    Ogrp_plt      = [ list(set(e))[0] for e in OSubgrp     ]
                    Color_grp_plt = [ colordico_for_main_datadico[e] if e in colordico_for_main_datadico else color2 for e in Ogrp_plt ]
            ###*** End of managing colors

            tgrp = (tgrp[0] , tgrp[1] + 1) # +1 because the end boundary day is not included
                                           # must stay there, in case of not colordico_for_main_datadico

            if not jul_date_plot:
                tgrp = conv.MJD2dt(tgrp)
                if extra_archive:
                    Tgrp_plt = [ (conv.MJD2dt(e[0]) , conv.MJD2dt(e[1])) for e in Tgrp_plt ]

            #PLOT part
            if tgrp[0] == tgrp[1] + dt.timedelta(days=1): # CASE NO PERIOD, ONLY ONE DAY
                ax.plot(tgrp[0],i , '.' + color1)
            else:                  # CASE PERIOD
                if not extra_archive: # ONE ARCHIVE REGULAR CASE
                    ax.plot(tgrp,[i]*2, '-' + color1)
                else:              # SUBCASE "EXTRA" : SEVERAL ARCHIVE
                    for tt , cc in zip(Tgrp_plt , Color_grp_plt):
                        ax.plot(tt,[i]*2 , '-' + cc)

        # PART 2 : PLOT ANEX DATADICO
        for idatadico_anex,datadico_anex in enumerate(datadico_anex_list):
            if stat in list(datadico_anex.keys()):
                T = datadico_anex[stat]
                T = [ t for t in T if start <= t <= end  ]
                T = sorted(T)

                if jul_date_plot:
                    T = conv.MJD2dt(T)

                pale_blue_dot , = ax.plot(T,i*np.ones(len(T)), 'o', color='skyblue',label="final SNX")
                legend_list     = [pale_blue_dot]

    #### LEGEND
    if colordico_for_main_datadico:
        import matplotlib.lines as mlines
        for arcnam , col in colordico_for_main_datadico.items():
            legend_list.append(mlines.Line2D([], [], color=col,
                                                     label=arcnam))
            plt.legend(handles=legend_list,loc='upper left',ncol=3,
                       columnspacing=1)


#    ax.set_yticks(np.arange(0,len(plotstat_lis)-1),plotstat_lis)
    plt.yticks(np.arange(0,len(stats_concat_list)),stats_concat_list)
    fig.autofmt_xdate()
    koef = np.sqrt(2) * 1
    fig.set_size_inches(koef*11.69,koef*len(stats_concat_list) * 0.28) #16.53
    ax.set_ylim([-1 , len(stats_concat_list) + 1])

    return fig
def read_erp(file_path_in, ac=None):
    """
    
    Read IGS Analysis Center ERP files

    Parameters
    ----------
    file_path_in :  str
        Path of the file in the local machine.

    ac :  str
        The analysis center that will be used. 
        If not precised, will be the first 3 letters of the input name


    Returns
    -------
    out1 :  pandas table
        Returns a panda table format with the data extracted from the file.
        

    Note
    ----
    Columns name
    
    ('MJD','X-P (arcsec)', 'Y-P (arcsec)', 'UT1UTC (E-7S)','LOD (E-7S/D)','S-X (E-6" arcsec)','S-Y (E-6" arcsec)',
    'S-UT (E-7S)','S-LD (E-7S/D)','NR (E-6" arcsec)', 'NF (E-6" arcsec)', 'NT (E-6" arcsec)',
    'X-RT (arcsec/D)','Y-RT (arcsec/D)','S-XR (E-6" arcsec/D)','S-YR (E-6" arcsec/D)', 'C-XY', 'C-XT',
    'C-YT', 'DPSI', 'DEPS','S-DP','S-DE')

    """

    caminho_arq = file_path_in

    #### FIND DELIVERY DATE
    name = os.path.basename(caminho_arq)

    if not ac:
        ac = name[:3]

    if len(name) == 12:
        dt_delivery = conv.sp3name2dt(caminho_arq)
    elif len(name) == 38:
        dt_delivery = conv.sp3name_v3_2dt(caminho_arq)
    else:
        dt_delivery = conv.posix2dt(0)

    le = open(caminho_arq, 'r')
    letudo = le.readlines()
    le.close()
    tamanho = len(letudo)  #usado para saber quantas linhas tem o arquivo

    #para = tamanho #onde o arquivo para de ser lido

    numeros = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
    #le = 0
    #numlin = 0 #numero de linhas da matriz de epocas
    #numcol = 16 #numero de colunas que a matriz final deve ter

    ERP = []

    if caminho_arq[-3:] in ('snx', 'ssc'):
        file = open(caminho_arq)
        Lines = file.readlines()
        XPO_stk = []
        XPO_std_stk = []
        YPO_stk = []
        YPO_std_stk = []
        LOD_stk = []
        LOD_std_stk = []
        MJD_stk = []
        marker = False

        for i in range(len(Lines)):

            if len(Lines[i].strip()) == 0:
                continue
            else:

                if Lines[i].split()[0] == '+SOLUTION/ESTIMATE':
                    marker = True

                if Lines[i].split()[0] == '-SOLUTION/ESTIMATE':
                    marker = False

                if utils.contains_word(Lines[i], 'XPO') and marker:
                    # Doy = (Lines[i][30:33])
                    # Year = (Lines[i][27:29])
                    # Pref_year = '20'
                    # Year = int(Pref_year+Year)
                    # Date = conv.doy2dt(Year,Doy)
                    Date = conv.datestr_sinex_2_dt(Lines[i].split()[5])
                    XPO = float(Lines[i][47:68]) * (10**-3)
                    XPO_std = float(Lines[i][69:80]) * (10**-3)
                    XPO_stk.append(XPO)
                    XPO_std_stk.append(XPO_std)
                    MJD_stk.append(conv.dt2MJD(Date))
                    #MJD_stk.append(cmg.jd_to_mjd(cmg.date_to_jd(Date.year,Date.month,Date.day)))

                if utils.contains_word(Lines[i], 'YPO') and marker:
                    # Doy = (Lines[i][30:33])
                    # Year = (Lines[i][27:29])
                    # Pref_year = '20'
                    # Year = int(Pref_year+Year)
                    # Date = conv.doy2dt(Year,Doy)
                    Date = conv.datestr_sinex_2_dt(Lines[i].split()[5])
                    YPO = float(Lines[i][47:68]) * (10**-3)
                    YPO_std = float(Lines[i][69:80]) * (10**-3)
                    YPO_stk.append(YPO)
                    YPO_std_stk.append(YPO_std)
                    MJD_stk.append(conv.dt2MJD(Date))
                    #MJD_stk.append(cmg.jd_to_mjd(cmg.date_to_jd(Date.year,Date.month,Date.day)))

                if utils.contains_word(Lines[i], 'LOD') and marker:
                    # Doy = (Lines[i][30:33])
                    # Year = (Lines[i][27:29])
                    # Pref_year = '20'
                    # Year = int(Pref_year+Year)
                    # Date = conv.doy2dt(Year,Doy)
                    Date = conv.datestr_sinex_2_dt(Lines[i].split()[5])
                    LOD = float(Lines[i][47:68]) * (10**+4)
                    LOD_std = float(Lines[i][69:80]) * (10**+4)
                    LOD_stk.append(LOD)
                    LOD_std_stk.append(LOD_std)
                    #MJD_stk.append(cmg.jd_to_mjd(cmg.date_to_jd(Date.year,Date.month,Date.day)))
                    MJD_stk.append(conv.dt2MJD(Date))

        MJD = list(sorted(set(MJD_stk)))
        if len(LOD_stk) == 0:
            LOD_stk = ['0'] * len(MJD)
            LOD_std_stk = ['0'] * len(MJD)

        for i in range(len(MJD)):

            ERP_data = [
                ac, MJD[i], XPO_stk[i], YPO_stk[i], 0, LOD_stk[i],
                XPO_std_stk[i], YPO_std_stk[i], 0, LOD_std_stk[i], 0, 0, 0, 0,
                0, 0, 0, dt_delivery
            ]

            ERP.append(ERP_data)

    if ac in ('COD', 'cod', 'com', 'cof', 'grg', 'mit', 'sio', 'igs', 'igr'):
        for i in range(tamanho + 1):
            linhaatual = linecache.getline(caminho_arq, i)
            if linhaatual[0:1] in numeros:
                ERP_data = linhaatual.split()
                for j in range(len(ERP_data)):
                    ERP_data[j] = float(ERP_data[j])
                ERP_data.insert(0, ac)
                ERP_data[2] = ERP_data[2] * (10**-6)
                ERP_data[3] = ERP_data[3] * (10**-6)
                ERP_data[13] = ERP_data[13] * (10**-6)
                ERP_data[14] = ERP_data[14] * (10**-6)
                del ERP_data[17:]
                ERP_data.append(dt_delivery)

                ERP.append(ERP_data)

        linecache.clearcache()

#        Erp_end = pd.DataFrame(ERP, columns=['AC','MJD','X-P', 'Y-P', 'UT1UTC(UT1 -TAI)','LOD','S-X','S-Y','S-UT','S-LD','NR', 'NF', 'NT',
#                                                 'X-RT','Y-RT','S-XR','S-YR',"Delivery_date"])
#        return Erp_end
#

    if ac in ('wum', 'grg', 'esa', 'mit', 'ngs', 'sio'):
        for i in range(tamanho + 1):
            linhaatual = linecache.getline(caminho_arq, i)
            if linhaatual[0:1] in numeros:
                ERP_data = linhaatual.split()
                for j in range(len(ERP_data)):
                    ERP_data[j] = float(ERP_data[j])
                ERP_data.insert(0, ac)
                ERP_data[2] = ERP_data[2] * (10**-6)
                ERP_data[3] = ERP_data[3] * (10**-6)
                ERP_data[13] = ERP_data[13] * (10**-6)
                ERP_data[14] = ERP_data[14] * (10**-6)
                ERP_data.append(dt_delivery)

                ERP.append(ERP_data)

        linecache.clearcache()

#        Erp_end = pd.DataFrame(ERP, columns=['AC','MJD','X-P', 'Y-P', 'UT1UTC(UT1 -TAI)','LOD','S-X','S-Y','S-UT','S-LD','NR', 'NF', 'NT',
#                                                 'X-RT','Y-RT','S-XR','S-YR'])
#        return Erp_end
#
    if ac in ('gbm', 'gfz', 'gfr', "p1_", "p1r"):
        for i in range(tamanho + 1):
            linhaatual = linecache.getline(caminho_arq, i)
            if linhaatual[0:1] in numeros:
                ERP_data = linhaatual.split()
                for j in range(len(ERP_data)):
                    ERP_data[j] = float(ERP_data[j])
                ERP_data.insert(0, ac)
                ERP_data[2] = ERP_data[2] * (10**-6)
                ERP_data[3] = ERP_data[3] * (10**-6)
                ERP_data[13] = ERP_data[13] * (10**-6)
                ERP_data[14] = ERP_data[14] * (10**-6)
                ERP_data.append(dt_delivery)

                ERP.append(ERP_data)
        linecache.clearcache()

#        Erp_end = pd.DataFrame(ERP, columns=['AC','MJD','X-P', 'Y-P', 'UT1UTC(UT1 -TAI)','LOD','S-X','S-Y','S-UT','S-LD','NR', 'NF', 'NT',
#                                                 'X-RT','Y-RT','S-XR','S-YR'])  ##EH TBM O RATE XY POR DIA??????
#        return Erp_end

    header = []
    if ac in ('emr'):
        for i in range(tamanho + 1):
            linhaatual = linecache.getline(caminho_arq, i)
            if linhaatual == 'EOP  SOLUTION':
                del ERP_data[:]
                header = ['EOP  SOLUTION']
            if linhaatual[0:1] in numeros and 'EOP  SOLUTION' in header:
                ERP_data = linhaatual.split()
                for j in range(len(ERP_data)):
                    ERP_data[j] = float(ERP_data[j])
                ERP_data.insert(0, ac)
                ERP_data[2] = ERP_data[2] * (10**-6)
                ERP_data[3] = ERP_data[3] * (10**-6)
                ERP_data[13] = ERP_data[13] * (10**-6)
                ERP_data[14] = ERP_data[14] * (10**-6)
                del ERP_data[17:]
                ERP_data.append(dt_delivery)

                ERP.append(ERP_data)
        linecache.clearcache()

    Erp_end = pd.DataFrame(ERP,
                           columns=[
                               'AC', 'MJD', 'X-P', 'Y-P', 'UT1UTC(UT1 -TAI)',
                               'LOD', 'S-X', 'S-Y', 'S-UT', 'S-LD', 'NR', 'NF',
                               'NT', 'X-RT', 'Y-RT', 'S-XR', 'S-YR',
                               'Delivered_date'
                           ])

    return Erp_end
Esempio n. 7
0
def vmf1(ah, aw, dt, dlat, zd):
    """
    This subroutine determines the VMF1 (Vienna Mapping Functions 1) for specific sites.

    Parameters
    ----------
    ah:
        hydrostatic coefficient a
    aw:
        wet coefficient a
    dt:
        datetime in python datetime
    dlat:
        ellipsoidal latitude in radians
    zd:
        zenith distance in radians

    Return
    ----------
    vmf1h:
        hydrostatic mapping function
    vmf1w:
        wet mapping function

    Reference
    ----------
    Boehm, J., B. Werl, H. Schuh (2006), Troposphere mapping functions for GPS and very long baseline interferometry
    from European Centre for Medium-Range Weather Forecasts operational analysis data,
    J. Geoph. Res., Vol. 111, B02406, doi:10.1029/2005JB003629.

    Notes
    ----------
    Written by Johannes Boehm, 2005 October 2

    Translated to python by Chaiyaporn Kitpracha
    """

    pi = 3.14159265359
    dmjd = conv.dt2MJD(dt)
    doy = dmjd - 44239.0 + 1 - 28

    bh = 0.0029
    c0h = 0.062

    if dlat < 0:
        phh = pi
        c11h = 0.007
        c10h = 0.002
    else:
        phh = 0
        c11h = 0.005
        c10h = 0.001

    ch = c0h + ((np.cos(doy/365.25*2*pi + phh)+1)*c11h/2 \
                + c10h)*(1-np.cos(dlat))

    sine = np.sin(pi / 2 - zd)
    beta = bh / (sine + ch)
    gamma = ah / (sine + beta)
    topcon = (1 + ah / (1 + bh / (1 + ch)))
    vmf1h = topcon / (sine + gamma)

    bw = 0.00146
    cw = 0.04391
    beta = bw / (sine + cw)
    gamma = aw / (sine + beta)
    topcon = (1 + aw / (1 + bw / (1 + cw)))
    vmf1w = topcon / (sine + gamma)

    return vmf1h, vmf1w
def write_sp3(SP3_DF_in,
              outpath,
              outname=None,
              prefix='orb',
              skip_null_epoch=True,
              force_format_c=False):
    """
    Write a SP3 file from an Orbit DataFrame

    Parameters
    ----------
    SP3_DF_in : DataFrame
        Input Orbit DataFrame.
    outpath : str
        The output path of the file (see also outname).
    outname : None or str, optional
        None = outpath is the full path (directory + filename) of the output.
        A string = a manual name for the file.
        'auto_old_cnv' = automatically generate the filename (old convention)
        'auto_new_cnv' = automatically generate the filename (new convention)
        The default is None.
    prefix : str, optional
        the output 3-char. name of the AC. The default is 'orb'.
    skip_null_epoch : bool, optional
        Do not write an epoch if all sats are null (filtering). 
        The default is True.
    force_format_c : bool, optional
        DESCRIPTION. The default is False.

    Returns
    -------
    The string containing the formatted SP3 data.
    """

    ################## MAIN DATA
    LinesStk = []

    SP3_DF_wrk = SP3_DF_in.sort_values(["epoch", "sat"])

    EpochRawList = SP3_DF_wrk["epoch"].unique()
    SatList = sorted(SP3_DF_wrk["sat"].unique())
    SatList = list(reversed(SatList))
    SatListSet = set(SatList)
    EpochUsedList = []

    if not "clk" in SP3_DF_wrk.columns:
        SP3_DF_wrk["clk"] = 999999.999999

    for epoc in EpochRawList:
        SP3epoc = pd.DataFrame(SP3_DF_wrk[SP3_DF_wrk["epoch"] == epoc])
        ## manage missing Sats for the current epoc
        MissingSats = SatListSet.difference(set(SP3epoc["sat"]))

        for miss_sat in MissingSats:
            miss_line = SP3epoc.iloc[0].copy()
            miss_line["sat"] = miss_sat
            miss_line["const"] = miss_sat[0]
            miss_line["x"] = 0.000000
            miss_line["y"] = 0.000000
            miss_line["z"] = 0.000000
            miss_line["clk"] = 999999.999999

            SP3epoc = SP3epoc.append(miss_line)
        #### end of missing sat bloc

        SP3epoc.sort_values("sat", inplace=True, ascending=False)
        timestamp = conv.dt2sp3_timestamp(conv.numpy_dt2dt(epoc)) + "\n"

        linefmt = "P{:}{:14.6f}{:14.6f}{:14.6f}{:14.6f}\n"

        LinesStkEpoch = []
        sum_val_epoch = 0
        for ilin, lin in SP3epoc.iterrows():
            if not "clk" in lin.index:  # manage case if no clk in columns
                lin["clk"] = 999999.999999
            line_out = linefmt.format(lin["sat"], lin["x"], lin["y"], lin["z"],
                                      lin["clk"])

            sum_val_epoch += lin["x"] + lin["y"] + lin["z"]

            LinesStkEpoch.append(line_out)

        ### if skip_null_epoch activated, print only if valid epoch
        if not (np.isclose(sum_val_epoch, 0) and skip_null_epoch):
            LinesStk.append(timestamp)  # stack the timestamp
            LinesStk = LinesStk + LinesStkEpoch  # stack the values
            EpochUsedList.append(epoc)  # stack the epoc as dt

    ################## HEADER
    ######### SATELLITE LIST

    Satline_stk = []
    Sigmaline_stk = []

    if force_format_c:
        nlines = 5
    else:
        div, mod = np.divmod(len(SatList), 17)

        if div < 5:
            nlines = 5
        else:
            nlines = div

            if mod != 0:
                nlines += 1

    for i in range(nlines):
        SatLine = SatList[17 * i:17 * (i + 1)]
        SatLineSigma = len(SatLine) * " 01"

        if len(SatLine) < 17:
            complem = " 00" * (17 - len(SatLine))
        else:
            complem = ""

        if i == 0:
            nbsat4line = len(SatList)
        else:
            nbsat4line = ''

        satline = "+  {:3}   ".format(nbsat4line) + "".join(
            SatLine) + complem + "\n"
        sigmaline = "++         0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0  0\n"
        sigmaline = "++       " + SatLineSigma + complem + "\n"

        Satline_stk.append(satline)
        Sigmaline_stk.append(sigmaline)

    ######### 2 First LINES
    start_dt = conv.numpy_dt2dt(np.min(EpochUsedList))

    header_line1 = "#cP" + conv.dt2sp3_timestamp(
        start_dt, False) + "     {:3}".format(
            len(EpochUsedList)) + "   u+U IGSXX FIT  XXX\n"

    delta_epoch = int(utils.most_common(np.diff(EpochUsedList) * 10**-9))
    MJD = conv.dt2MJD(start_dt)
    MJD_int = int(np.floor(MJD))
    MJD_dec = MJD - MJD_int
    gps_wwww, gps_sec = conv.dt2gpstime(start_dt, False, "gps")

    header_line2 = "## {:4} {:15.8f} {:14.8f} {:5} {:15.13f}\n".format(
        gps_wwww, gps_sec, delta_epoch, MJD_int, MJD_dec)

    ######### HEADER BOTTOM
    header_bottom = """%c M  cc GPS ccc cccc cccc cccc cccc ccccc ccccc ccccc ccccc
%c cc cc ccc ccc cccc cccc cccc cccc ccccc ccccc ccccc ccccc
%f  1.2500000  1.025000000  0.00000000000  0.000000000000000
%f  0.0000000  0.000000000  0.00000000000  0.000000000000000
%i    0    0    0    0      0      0      0      0         0
%i    0    0    0    0      0      0      0      0         0
/* PCV:IGSXX_XXXX OL/AL:FESXXXX  NONE     YN CLK:CoN ORB:CoN
/*     GeodeZYX Toolbox Output
/*
/*
"""

    ################## FINAL STACK

    FinalLinesStk = []

    FinalLinesStk.append(header_line1)
    FinalLinesStk.append(header_line2)
    FinalLinesStk = FinalLinesStk + Satline_stk + Sigmaline_stk
    FinalLinesStk.append(header_bottom)
    FinalLinesStk = FinalLinesStk + LinesStk + ["EOF"]

    FinalStr = "".join(FinalLinesStk)

    ### Manage the file path
    prefix_opera = prefix

    if not outname:
        outpath_opera = outpath
    elif outname == 'auto_old_cnv':
        week, dow = conv.dt2gpstime(start_dt)
        filename = prefix_opera + str(week) + str(dow) + '.sp3'
        outpath_opera = os.path.join(outpath, filename)

    elif outname == 'auto_new_cnv':
        print("ERR: not implemented yet !!!!!")
        raise Exception

    F = open(outpath_opera, "w+")
    F.write(FinalStr)
Esempio n. 9
0
def OrbDF_crf2trf(DForb_inp,
                  DF_EOP_inp,
                  time_scale_inp="gps",
                  inv_trf2crf=False):
    """
    Convert an Orbit DataFrame from Celetrial Reference Frame to 
    Terrestrial Reference Frame (.
    
    Requires EOP to work. Cf. note below.

    Parameters
    ----------
    DForb_inp : DataFrame
        Input Orbit DataFrame in Celetrial Reference Frame.
    DF_EOP_inp : DataFrame
        EOP DataFrame  (C04 format).
    time_scale_inp : str, optional
        The time scale used in. manage 'utc', 'tai' and 'gps'.
        The default is "gps".
    inv_trf2crf : bool, optional
        Provide the inverse transformation TRF => CRF.
        The default is False.

    Returns
    -------
    DForb_out : DataFrame
        Output Orbit DataFrame in Terrestrial Reference Frame.
        (or Celestrial if inv_trf2crf is True)
        
    Note
    ----
    The EOP can be obtained from the IERS C04 products.
    e.g.
    https://datacenter.iers.org/data/latestVersion/224_EOP_C04_14.62-NOW.IAU2000A224.txt
    To get them as a Compatible DataFrame, use the function
    files_rw.read_eop_C04()
    """

    DForb = DForb_inp.copy()

    ### bring everything to UTC
    if time_scale_inp.lower() == "gps":
        DForb["epoch_utc"] = conv.dt_gpstime2dt_utc(DForb["epoch"])
    elif time_scale_inp.lower() == "tai":
        DForb["epoch_utc"] = conv.dt_tai2dt_utc(DForb["epoch"])
    elif time_scale_inp.lower() == "utc":
        DForb["epoch_utc"] = DForb["epoch"]
    ### TT and UT1 are not implemented (quite unlikely to have them as input)

    ### do the time scale's conversion
    DForb["epoch_tai"] = conv.dt_utc2dt_tai(DForb["epoch_utc"])
    DForb["epoch_tt"] = conv.dt_tai2dt_tt(DForb["epoch_tai"])
    DForb["epoch_ut1"] = conv.dt_utc2dt_ut1_smart(DForb["epoch_utc"],
                                                  DF_EOP_inp)

    ### Do the EOP interpolation
    DF_EOP_intrp = eop_interpotate(DF_EOP_inp, DForb["epoch_utc"])
    ### bring the EOP to radians
    Xeop = np.deg2rad(conv.arcsec2deg(DF_EOP_intrp['x']))
    Yeop = np.deg2rad(conv.arcsec2deg(DF_EOP_intrp['y']))

    TRFstk = []

    for tt, ut1, xeop, yeop, x, y, z in zip(DForb["epoch_tt"],
                                            DForb["epoch_ut1"], Xeop, Yeop,
                                            DForb['x'], DForb['y'],
                                            DForb['z']):

        MatCRF22TRF = sofa.iau_c2t06a(2400000.5, conv.dt2MJD(tt), 2400000.5,
                                      conv.dt2MJD(ut1), xeop, yeop)
        if inv_trf2crf:
            MatCRF22TRF = np.linalg.inv(MatCRF22TRF)

        CRF = np.array([x, y, z])
        TRF = np.dot(MatCRF22TRF, CRF)

        TRFstk.append(TRF)

    ### Final stack and replacement
    TRFall = np.vstack(TRFstk)
    DForb_out = DForb_inp.copy()
    DForb_out[["x", "y", "z"]] = TRFall

    return DForb_out