def read_combi_sum_exclu(sum_file, return_as_df=True, use_intuitive_bool=True):

    t_dt = conv.sp3name2dt(sum_file)

    with open(sum_file) as f:
        cont = f.readlines()

    excluded_dic = dict()
    useful_ssection = False
    useful_ssection_k = 0
    for l in cont:
        f = l.split()
        if "---|---" in l and useful_ssection_k < 2:
            useful_ssection = not useful_ssection
            useful_ssection_k += 1
            continue

        if not useful_ssection:
            continue

        prn_raw = f[0]

        if "X" in prn_raw:
            exclu = True
        else:
            exclu = False

        if use_intuitive_bool:
            exclu = not exclu

        prn_int = int(f[0].replace("X", "").split()[0])

        prn_good = prn_int_2_prn_str(prn_int)

        excluded_dic[prn_good] = exclu

    if return_as_df:
        return pd.DataFrame(excluded_dic, index=[t_dt])
    else:
        return excluded_dic
Ejemplo n.º 2
0
def read_erp(file_path_in, ac=None):
    """
    
    Read IGS Analysis Center ERP files

    Parameters
    ----------
    file_path_in :  str
        Path of the file in the local machine.

    ac :  str
        The analysis center that will be used. 
        If not precised, will be the first 3 letters of the input name


    Returns
    -------
    out1 :  pandas table
        Returns a panda table format with the data extracted from the file.
        

    Note
    ----
    Columns name
    
    ('MJD','X-P (arcsec)', 'Y-P (arcsec)', 'UT1UTC (E-7S)','LOD (E-7S/D)','S-X (E-6" arcsec)','S-Y (E-6" arcsec)',
    'S-UT (E-7S)','S-LD (E-7S/D)','NR (E-6" arcsec)', 'NF (E-6" arcsec)', 'NT (E-6" arcsec)',
    'X-RT (arcsec/D)','Y-RT (arcsec/D)','S-XR (E-6" arcsec/D)','S-YR (E-6" arcsec/D)', 'C-XY', 'C-XT',
    'C-YT', 'DPSI', 'DEPS','S-DP','S-DE')

    """

    caminho_arq = file_path_in

    #### FIND DELIVERY DATE
    name = os.path.basename(caminho_arq)

    if not ac:
        ac = name[:3]

    if len(name) == 12:
        dt_delivery = conv.sp3name2dt(caminho_arq)
    elif len(name) == 38:
        dt_delivery = conv.sp3name_v3_2dt(caminho_arq)
    else:
        dt_delivery = conv.posix2dt(0)

    le = open(caminho_arq, 'r')
    letudo = le.readlines()
    le.close()
    tamanho = len(letudo)  #usado para saber quantas linhas tem o arquivo

    #para = tamanho #onde o arquivo para de ser lido

    numeros = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
    #le = 0
    #numlin = 0 #numero de linhas da matriz de epocas
    #numcol = 16 #numero de colunas que a matriz final deve ter

    ERP = []

    if caminho_arq[-3:] in ('snx', 'ssc'):
        file = open(caminho_arq)
        Lines = file.readlines()
        XPO_stk = []
        XPO_std_stk = []
        YPO_stk = []
        YPO_std_stk = []
        LOD_stk = []
        LOD_std_stk = []
        MJD_stk = []
        marker = False

        for i in range(len(Lines)):

            if len(Lines[i].strip()) == 0:
                continue
            else:

                if Lines[i].split()[0] == '+SOLUTION/ESTIMATE':
                    marker = True

                if Lines[i].split()[0] == '-SOLUTION/ESTIMATE':
                    marker = False

                if utils.contains_word(Lines[i], 'XPO') and marker:
                    # Doy = (Lines[i][30:33])
                    # Year = (Lines[i][27:29])
                    # Pref_year = '20'
                    # Year = int(Pref_year+Year)
                    # Date = conv.doy2dt(Year,Doy)
                    Date = conv.datestr_sinex_2_dt(Lines[i].split()[5])
                    XPO = float(Lines[i][47:68]) * (10**-3)
                    XPO_std = float(Lines[i][69:80]) * (10**-3)
                    XPO_stk.append(XPO)
                    XPO_std_stk.append(XPO_std)
                    MJD_stk.append(conv.dt2MJD(Date))
                    #MJD_stk.append(cmg.jd_to_mjd(cmg.date_to_jd(Date.year,Date.month,Date.day)))

                if utils.contains_word(Lines[i], 'YPO') and marker:
                    # Doy = (Lines[i][30:33])
                    # Year = (Lines[i][27:29])
                    # Pref_year = '20'
                    # Year = int(Pref_year+Year)
                    # Date = conv.doy2dt(Year,Doy)
                    Date = conv.datestr_sinex_2_dt(Lines[i].split()[5])
                    YPO = float(Lines[i][47:68]) * (10**-3)
                    YPO_std = float(Lines[i][69:80]) * (10**-3)
                    YPO_stk.append(YPO)
                    YPO_std_stk.append(YPO_std)
                    MJD_stk.append(conv.dt2MJD(Date))
                    #MJD_stk.append(cmg.jd_to_mjd(cmg.date_to_jd(Date.year,Date.month,Date.day)))

                if utils.contains_word(Lines[i], 'LOD') and marker:
                    # Doy = (Lines[i][30:33])
                    # Year = (Lines[i][27:29])
                    # Pref_year = '20'
                    # Year = int(Pref_year+Year)
                    # Date = conv.doy2dt(Year,Doy)
                    Date = conv.datestr_sinex_2_dt(Lines[i].split()[5])
                    LOD = float(Lines[i][47:68]) * (10**+4)
                    LOD_std = float(Lines[i][69:80]) * (10**+4)
                    LOD_stk.append(LOD)
                    LOD_std_stk.append(LOD_std)
                    #MJD_stk.append(cmg.jd_to_mjd(cmg.date_to_jd(Date.year,Date.month,Date.day)))
                    MJD_stk.append(conv.dt2MJD(Date))

        MJD = list(sorted(set(MJD_stk)))
        if len(LOD_stk) == 0:
            LOD_stk = ['0'] * len(MJD)
            LOD_std_stk = ['0'] * len(MJD)

        for i in range(len(MJD)):

            ERP_data = [
                ac, MJD[i], XPO_stk[i], YPO_stk[i], 0, LOD_stk[i],
                XPO_std_stk[i], YPO_std_stk[i], 0, LOD_std_stk[i], 0, 0, 0, 0,
                0, 0, 0, dt_delivery
            ]

            ERP.append(ERP_data)

    if ac in ('COD', 'cod', 'com', 'cof', 'grg', 'mit', 'sio', 'igs', 'igr'):
        for i in range(tamanho + 1):
            linhaatual = linecache.getline(caminho_arq, i)
            if linhaatual[0:1] in numeros:
                ERP_data = linhaatual.split()
                for j in range(len(ERP_data)):
                    ERP_data[j] = float(ERP_data[j])
                ERP_data.insert(0, ac)
                ERP_data[2] = ERP_data[2] * (10**-6)
                ERP_data[3] = ERP_data[3] * (10**-6)
                ERP_data[13] = ERP_data[13] * (10**-6)
                ERP_data[14] = ERP_data[14] * (10**-6)
                del ERP_data[17:]
                ERP_data.append(dt_delivery)

                ERP.append(ERP_data)

        linecache.clearcache()

#        Erp_end = pd.DataFrame(ERP, columns=['AC','MJD','X-P', 'Y-P', 'UT1UTC(UT1 -TAI)','LOD','S-X','S-Y','S-UT','S-LD','NR', 'NF', 'NT',
#                                                 'X-RT','Y-RT','S-XR','S-YR',"Delivery_date"])
#        return Erp_end
#

    if ac in ('wum', 'grg', 'esa', 'mit', 'ngs', 'sio'):
        for i in range(tamanho + 1):
            linhaatual = linecache.getline(caminho_arq, i)
            if linhaatual[0:1] in numeros:
                ERP_data = linhaatual.split()
                for j in range(len(ERP_data)):
                    ERP_data[j] = float(ERP_data[j])
                ERP_data.insert(0, ac)
                ERP_data[2] = ERP_data[2] * (10**-6)
                ERP_data[3] = ERP_data[3] * (10**-6)
                ERP_data[13] = ERP_data[13] * (10**-6)
                ERP_data[14] = ERP_data[14] * (10**-6)
                ERP_data.append(dt_delivery)

                ERP.append(ERP_data)

        linecache.clearcache()

#        Erp_end = pd.DataFrame(ERP, columns=['AC','MJD','X-P', 'Y-P', 'UT1UTC(UT1 -TAI)','LOD','S-X','S-Y','S-UT','S-LD','NR', 'NF', 'NT',
#                                                 'X-RT','Y-RT','S-XR','S-YR'])
#        return Erp_end
#
    if ac in ('gbm', 'gfz', 'gfr', "p1_", "p1r"):
        for i in range(tamanho + 1):
            linhaatual = linecache.getline(caminho_arq, i)
            if linhaatual[0:1] in numeros:
                ERP_data = linhaatual.split()
                for j in range(len(ERP_data)):
                    ERP_data[j] = float(ERP_data[j])
                ERP_data.insert(0, ac)
                ERP_data[2] = ERP_data[2] * (10**-6)
                ERP_data[3] = ERP_data[3] * (10**-6)
                ERP_data[13] = ERP_data[13] * (10**-6)
                ERP_data[14] = ERP_data[14] * (10**-6)
                ERP_data.append(dt_delivery)

                ERP.append(ERP_data)
        linecache.clearcache()

#        Erp_end = pd.DataFrame(ERP, columns=['AC','MJD','X-P', 'Y-P', 'UT1UTC(UT1 -TAI)','LOD','S-X','S-Y','S-UT','S-LD','NR', 'NF', 'NT',
#                                                 'X-RT','Y-RT','S-XR','S-YR'])  ##EH TBM O RATE XY POR DIA??????
#        return Erp_end

    header = []
    if ac in ('emr'):
        for i in range(tamanho + 1):
            linhaatual = linecache.getline(caminho_arq, i)
            if linhaatual == 'EOP  SOLUTION':
                del ERP_data[:]
                header = ['EOP  SOLUTION']
            if linhaatual[0:1] in numeros and 'EOP  SOLUTION' in header:
                ERP_data = linhaatual.split()
                for j in range(len(ERP_data)):
                    ERP_data[j] = float(ERP_data[j])
                ERP_data.insert(0, ac)
                ERP_data[2] = ERP_data[2] * (10**-6)
                ERP_data[3] = ERP_data[3] * (10**-6)
                ERP_data[13] = ERP_data[13] * (10**-6)
                ERP_data[14] = ERP_data[14] * (10**-6)
                del ERP_data[17:]
                ERP_data.append(dt_delivery)

                ERP.append(ERP_data)
        linecache.clearcache()

    Erp_end = pd.DataFrame(ERP,
                           columns=[
                               'AC', 'MJD', 'X-P', 'Y-P', 'UT1UTC(UT1 -TAI)',
                               'LOD', 'S-X', 'S-Y', 'S-UT', 'S-LD', 'NR', 'NF',
                               'NT', 'X-RT', 'Y-RT', 'S-XR', 'S-YR',
                               'Delivered_date'
                           ])

    return Erp_end
Ejemplo n.º 3
0
def sp3_overlap_creator(ac_list,
                        dir_in,
                        dir_out,
                        suffix_out_input=None,
                        overlap_size=7200,
                        force=False,
                        manage_missing_sats='common_sats_only',
                        eliminate_null_sat=True,
                        severe=False,
                        separated_systems_export=False,
                        first_date=None):
    """
    Generate an SP3 Orbit file with overlap based on the SP3s of the 
    days before and after
    
    Parameters
    ----------
    ac_list : list
        3-character codes of the ACs.
    dir_in : str
        where the input sp3 are.
    dir_out : str
         where the output sp3 will be outputed.
    suffix_out_input : str, optional
        last char of the 3-char. code. if None, then it is the same as input.
    overlap_size : int, optional
        Overlapsize. The default is 7200.
    force : True, optional
        force overwrite. The default is False.
    manage_missing_sats : str, optional
        'exclude' : generate a file with only the common sat 
        between the 3 days. Thus, exclude the missing sats
        'extrapolate' : extrapolate the missing sats based on the first/last epoch
        The default is 'common_sats_only'.
    eliminate_null_sat : bool, optional
        eliminate null sat. The default is True.
    severe : bool, optional
        raise an exception if problem. The default is False.
    separated_systems_export : bool, optional
        export different sp3 for different system. The default is False.
    first_date : datetime, optional
        exclude SP3 before this epoch

    Returns
    -------
    None.

    Note
    ----
    start/end date are not implemented
    the force option skips existing files 

    """

    Dict_Lfiles_ac = dict()

    for ac in ac_list:
        Dict_Lfiles_ac[ac] = []
        Lfile = Dict_Lfiles_ac[ac]

        Extlist = ["sp3", "SP3", "sp3.gz", "SP3.gz"]
        for ext in Extlist:
            Lfile = Lfile + utils.find_recursive(dir_in, "*" + ac + "*" + ext)
        print("Nb of SP3 found for", ac, len(Lfile))

        if not suffix_out_input:
            suffix_out = ac
        else:
            suffix_out = ac[:2] + suffix_out_input

        D = []
        WWWWD = []

        for sp3 in Lfile:
            #wwwwd_str = os.path.basename(sp3)[3:8]
            #D.append(conv.gpstime2dt(int(wwwwd_str[:4]),int(wwwwd_str[4:])))

            dat = conv.sp3name2dt(sp3)
            D.append(dat)

        for dat in D[1:-1]:  ####if selection manuel, zip > 2lists !!!
            try:
                print("***********", ac, dat)

                if first_date:
                    if dat < first_date:
                        print("INFO: SKIP date", dat)
                        continue

                wwwwd_str = conv.dt_2_sp3_datestr(dat).zfill(5)

                dat_bef = dat - dt.timedelta(days=1)
                dat_aft = dat + dt.timedelta(days=1)

                wwwwd_str_bef = utils.join_improved(
                    "", *conv.dt2gpstime(dat_bef)).zfill(5)
                wwwwd_str_aft = utils.join_improved(
                    "", *conv.dt2gpstime(dat_aft)).zfill(5)

                ###### check if exists
                dir_out_wk = os.path.join(dir_out, "wk" + str(wwwwd_str)[:4])
                utils.create_dir(dir_out_wk)
                fil_out = dir_out_wk + "/" + suffix_out + wwwwd_str + ".sp3"

                if not force and os.path.isfile(fil_out):
                    print("0))", fil_out, "exists, skipping...")
                    continue

                ### *************** STEP 1 ***************
                print("1)) Search for the days before/after")
                print("1))", dat_bef, dat_aft)

                p1 = utils.find_regex_in_list(wwwwd_str + ".sp3", Lfile, True)
                p_bef = utils.find_regex_in_list(wwwwd_str_bef + ".sp3", Lfile,
                                                 True)
                p_aft = utils.find_regex_in_list(wwwwd_str_aft + ".sp3", Lfile,
                                                 True)

                print("1)) Files found for the days before/after")
                print("0b)", p_bef)
                print("01)", p1)
                print("0a)", p_aft)

                if not p1 or not p_bef or not p_aft:
                    print("ERROR with day", dat)
                    continue

                SP3 = files_rw.read_sp3(p1)
                SP3_bef = files_rw.read_sp3(p_bef)
                SP3_aft = files_rw.read_sp3(p_aft)

                ### Filtering to keep P only
                SP3 = SP3[SP3.type == "P"]
                SP3_bef = SP3_bef[SP3_bef.type == "P"]
                SP3_aft = SP3_aft[SP3_aft.type == "P"]

                SP3_bef = SP3_bef[SP3_bef["epoch"] < SP3["epoch"].min()]
                SP3_aft = SP3_aft[SP3_aft["epoch"] > SP3["epoch"].max()]

                SP3concat = pd.concat((SP3_bef, SP3, SP3_aft))

                dat_filter_bef = dat - dt.timedelta(seconds=overlap_size)
                dat_filter_aft = dat + dt.timedelta(
                    seconds=overlap_size) + dt.timedelta(days=1)

                ### *************** STEP 2 ***************
                print("2)) dates of the overlap period before/after")
                print("2))", dat_filter_bef, dat_filter_aft)

                ### *************** STEP 3 ***************
                print("3)) Dates of: SP3 concatenated, before, current, after")
                print("3))", SP3concat["epoch"].min(),
                      SP3concat["epoch"].max())
                print("3b)", SP3_bef["epoch"].min(), SP3_bef["epoch"].max())
                print("31)", SP3["epoch"].min(), SP3["epoch"].max())
                print("3a)", SP3_aft["epoch"].min(), SP3_aft["epoch"].max())

                SP3concat = SP3concat[(SP3concat["epoch"] >= dat_filter_bef)
                                      & (SP3concat["epoch"] <= dat_filter_aft)]

                ########## HERE WE MANAGE THE MISSING SATS
                if manage_missing_sats == "exclude":
                    print("4))", "remove missing sats ")
                    common_sats = set(SP3_bef["sat"]).intersection(
                        set(SP3["sat"])).intersection(set(SP3_aft["sat"]))
                    SP3concat = SP3concat[SP3concat["sat"].isin(common_sats)]
                elif manage_missing_sats == "extrapolate":
                    print("4))", "extrapolate missing sats ")
                    for iovl, SP3_ovl in enumerate((SP3_bef, SP3_aft)):
                        if iovl == 0:
                            backward = True
                            forward = False
                            backfor = "backward"
                        elif iovl == 1:
                            backward = False
                            forward = True
                            backfor = "forward"

                        Sats = set(SP3["sat"])
                        Sats_ovl = set(SP3_ovl["sat"])

                        Sats_miss = Sats.difference(Sats_ovl)
                        if not Sats_miss:
                            continue
                        print("4a)", "extrapolate missing sats", backfor,
                              Sats_miss)

                        SP3extrapo_in = SP3concat[SP3concat["sat"].isin(
                            Sats_miss)]

                        #step = utils.most_common(SP3concat["epoch"].diff().dropna())
                        #step = step.astype('timedelta64[s]').astype(np.int32)
                        step = 900
                        #print(step)

                        #print("SP3extrapo_in",SP3extrapo_in)

                        SP3extrapo = reffram.extrapolate_sp3_DataFrame(
                            SP3extrapo_in,
                            step=step,
                            n_step=int(overlap_size / step),
                            backward=backward,
                            forward=forward,
                            until_backward=dat_filter_bef,
                            until_forward=dat_filter_aft,
                            return_all=False)

                        SP3concat = pd.concat((SP3concat, SP3extrapo))
                        print(SP3extrapo)

                else:
                    print("ERR: check manage_missing_sats value")
                    raise Exception

                if eliminate_null_sat:
                    GoodSats = []
                    for sat in SP3concat["sat"].unique():
                        XYZvals = SP3concat[SP3concat["sat"] == sat][[
                            "x", "y", "z"
                        ]].sum(axis=1)

                        V = np.sum(np.isclose(XYZvals, 0)) / len(XYZvals)

                        if V < 0.50:
                            GoodSats.append(sat)
                        else:
                            print("6) eliminate because null position", sat)

                    SP3concat = SP3concat[SP3concat["sat"].isin(GoodSats)]

                ### *************** STEP 7 ***************
                print("7))", "Start/End Epoch of the concatenated file ")
                print("7))", SP3concat["epoch"].min(),
                      SP3concat["epoch"].max())

                #### All systems
                print("8)) outputed file")
                print(fil_out)
                write_sp3(SP3concat, fil_out)

                #### system separated
                if False:
                    for sys in SP3concat["const"].unique():
                        try:
                            SP3concat_sys = SP3concat[SP3concat["const"] ==
                                                      sys]
                            fil_out_sys = dir_out_wk + "/" + suffix_out[:2] + sys.lower(
                            ) + wwwwd_str.zfill(5) + ".sp3"
                            print("9)) outputed file")
                            print(fil_out_sys)
                            write_sp3(SP3concat_sys, fil_out_sys)
                        except:
                            continue

            except KeyboardInterrupt:
                raise KeyboardInterrupt

            except Exception as e:
                if severe:
                    print("WARN:", e)
                    raise e
                else:
                    print("WARN: Error", e, "but no severe mode, continue...")
                    continue
    """
Ejemplo n.º 4
0
def read_pdm_res_slr_mono(res_file_in, sol="sol"):
    """
    Read a PDM7 res(idual) file for SLR Validation
    
    Parameters
    ----------
    res_file_in : str
        path of the input res file.
        
    sol : str or lambda fct
        solution name
        if it is a lambda fct, it will grab the sol name from the full residual path
        e.g. : solnam = lambda x: x.split("/")[-5][4:]

    Returns
    -------
    DFout : Pandas DataFrame
        output DataFrame.

    """

    dat = conv.sp3name2dt("xxx" + os.path.basename(res_file_in)[:5])

    ### get useful values
    L = utils.extract_text_between_elements_2(res_file_in, "\+residuals",
                                              "\-residuals")
    L = L[3:-1]

    output = io.StringIO()
    output.write("".join(L))
    output.seek(0)

    ###
    if utils.is_lambda(sol):
        sol_wrk = sol(res_file_in)
    else:
        sol_wrk = sol

    ### read
    DFout = pd.read_csv(output, header=None, delim_whitespace=True)

    ### rename useful columns
    DFout = DFout.rename(
        columns={
            0: 'time',
            1: 'sta',
            2: 'sat',
            4: 'res',
            5: 'elev',
            6: 'azi',
            7: 'amb',
            8: 'slr',
            9: 'dt_sta',
            10: 'delay',
            11: 'sig_sta'
        })

    DFout["day"] = dat
    DFout["epoc"] = dat + DFout["time"].apply(
        lambda x: dt.timedelta(microseconds=x * 86400 * 10 * 6))
    DFout['sol'] = sol_wrk
    DFout["sys"] = DFout["sat"].str[0]

    return DFout