def read_combi_sum_full(sum_full_file,
                        RMS_lines_output=True,
                        set_PRN_as_index=True):
    Vals_stk = []

    for l in open(sum_full_file):

        F = l.split()
        if "|" in F:
            F.remove("|")

        ### Find date line
        if "MJD:" in l:
            date_line = l

        ### Skip useless lines
        if not "|" in l or "------" in l:
            continue

        ### Find AC list
        if "PRN" in l:
            ACs_list = F
            ACs_list.append("RMS_sat")
            ACs_list.append("PRN_str")
            ACs_list.append("CONST")

        elif F[0].isnumeric():
            Fout = [float(f) for f in F]
            Fout[0] = int(Fout[0])
            #Add the PRN string and the constellation
            Fout.append(prn_int_2_prn_str(int(Fout[0])))
            Fout.append(Fout[-1][0])

            Vals_stk.append(Fout)

        elif "RMS" in F[0] and RMS_lines_output:
            Fout = [float(f) for f in F[1:]]
            Fout.append(np.nan)
            Fout.insert(0, F[0])
            #Add FAKE the PRN string and the constellation
            Fout.append(F[0])
            Fout.append(None)

            Vals_stk.append(Fout)

    DF = pd.DataFrame(Vals_stk, columns=ACs_list)

    ### Date management
    mjd = float(date_line.split("MJD:")[1].split()[0])
    date_dt = conv.MJD2dt(mjd)

    DF.date_mjd = mjd
    DF.date_dt = date_dt

    DF.date_gps = utils.join_improved("", conv.dt2gpstime(date_dt))

    if set_PRN_as_index:
        DF.set_index("PRN_str", inplace=True)

    return DF
def compar_orbit(Data_inp_1,
                 Data_inp_2,
                 step_data=900,
                 sats_used_list=['G'],
                 name1='',
                 name2='',
                 use_name_1_2_for_table_name=False,
                 RTNoutput=True,
                 convert_ECEF_ECI=True,
                 clean_null_values=True,
                 conv_coef=10**3,
                 return_satNull=False):
    """
    Compares 2 GNSS orbits files (SP3), and gives a summary plot and a
    statistics table

    Parameters
    ----------
    Data_inp_1 & Data_inp_2 : str or Pandas DataFrame
        contains the orbits or path (string) to the sp3

    step_data : int
        per default data sampling

    sats_used_list : list of str
        used constellation or satellite : G E R C ... E01 , G02 ...
        Individuals satellites are prioritary on whole constellations
        e.g. ['G',"E04"]


    RTNoutput : bool
        select output, Radial Transverse Normal or XYZ

    convert_ECEF_ECI : bool
        convert sp3 ECEF => ECI, must be True in operational !

    name1 & name2 : str (optionals)
        optional custom names for the 2 orbits

    use_name_1_2_for_table_name : bool
        False : use name 1 and 2 for table name, use datafile instead

    clean_null_values : bool or str
        if True or "all" remove sat position in all X,Y,Z values
        are null (0.000000)
        if "any", remove sat position if X or Y or Z is null
        if False, keep everything
        
    conv_coef : int
        conversion coefficient, km to m 10**3, km to mm 10**6

    Returns
    -------
    Diff_sat_all : Pandas DataFrame
    contains differences b/w Data_inp_1 & Data_inp_2
    in Radial Transverse Normal OR XYZ frame

        Attributes of Diff_sat_all :
            Diff_sat_all.name : title of the table

    Note
    ----
    clean_null_values if useful (and necessary) only if
    convert_ECEF_ECI = False
    if convert_ECEF_ECI = True, the cleaning will be done by
    a side effect trick : the convertion ECEF => ECI will generate NaN
    for a zero-valued position
    But, nevertheless, activating  clean_null_values = True is better
    This Note is in fact usefull if you want to see bad positions on a plot
    => Then convert_ECEF_ECI = False and clean_null_values = False

    Source
    ------
    "Coordinate Systems", ASEN 3200 1/24/06 George H. Born

    """

    # selection of both used Constellations AND satellites
    const_used_list = []
    sv_used_list = []
    for sat in sats_used_list:
        if len(sat) == 1:
            const_used_list.append(sat)
        elif len(sat) == 3:
            sv_used_list.append(sat)
            if not sat[0] in const_used_list:
                const_used_list.append(sat[0])

    # Read the files or DataFrames
    # metadata attributes are not copied
    # Thus, manual copy ...
    # (Dirty way, should be impoved without so many lines ...)
    if type(Data_inp_1) is str:
        D1orig = files_rw.read_sp3(Data_inp_1, epoch_as_pd_index=True)
    else:
        D1orig = Data_inp_1.copy(True)
        try:
            D1orig.name = Data_inp_1.name
        except:
            D1orig.name = "no_name"
        try:
            D1orig.path = Data_inp_1.path
        except:
            D1orig.path = "no_path"
        try:
            D1orig.filename = Data_inp_1.filename
        except:
            D1orig.filename = "no_filename"

    if type(Data_inp_2) is str:
        D2orig = files_rw.read_sp3(Data_inp_2, epoch_as_pd_index=True)
    else:
        D2orig = Data_inp_2.copy(True)
        try:
            D2orig.name = Data_inp_2.name
        except:
            D2orig.name = "no_name"
        try:
            D2orig.path = Data_inp_2.path
        except:
            D2orig.path = "no_path"
        try:
            D2orig.filename = Data_inp_2.filename
        except:
            D2orig.filename = "no_filename"

    #### NB : It has been decided with GM that the index of a SP3 dataframe
    ####      will be integers, not epoch datetime anymore
    ####      BUT here, for legacy reasons, the index has to be datetime

    if isinstance(D1orig.index[0], (int, np.integer)):
        D1orig.set_index("epoch", inplace=True)

    if isinstance(D2orig.index[0], (int, np.integer)):
        D2orig.set_index("epoch", inplace=True)

    Diff_sat_stk = []

    # This block is for removing null values
    if clean_null_values:
        if clean_null_values == "all":
            all_or_any = np.all
        elif clean_null_values == "any":
            all_or_any = np.any
        else:
            all_or_any = np.all

        xyz_lst = ['x', 'y', 'z']

        D1_null_bool = all_or_any(np.isclose(D1orig[xyz_lst], 0.), axis=1)
        D2_null_bool = all_or_any(np.isclose(D2orig[xyz_lst], 0.), axis=1)

        D1 = D1orig[np.logical_not(D1_null_bool)]
        D2 = D2orig[np.logical_not(D2_null_bool)]

        if np.any(D1_null_bool) or np.any(D2_null_bool):
            sat_nul = utils.join_improved(
                " ", *list(set(D1orig[D1_null_bool]["sat"])))
            print("WARN : Null values contained in SP3 files : ")
            print(
                "f1:", np.sum(D1_null_bool),
                utils.join_improved(" ",
                                    *list(set(D1orig[D1_null_bool]["sat"]))))
            print(
                "f2:", np.sum(D2_null_bool),
                utils.join_improved(" ",
                                    *list(set(D2orig[D2_null_bool]["sat"]))))
        else:
            sat_nul = []

    else:
        D1 = D1orig.copy()
        D2 = D2orig.copy()

    for constuse in const_used_list:
        D1const = D1[D1['const'] == constuse]
        D2const = D2[D2['const'] == constuse]

        # checking if the data correspond to the step
        bool_step1 = np.mod((D1const.index - np.min(D1.index)).seconds,
                            step_data) == 0
        bool_step2 = np.mod((D2const.index - np.min(D2.index)).seconds,
                            step_data) == 0

        D1window = D1const[bool_step1]
        D2window = D2const[bool_step2]

        # find common sats and common epochs
        sv_set = sorted(
            list(set(D1window['sv']).intersection(set(D2window['sv']))))
        epoc_set = sorted(
            list(set(D1window.index).intersection(set(D2window.index))))

        # if special selection of sats, then apply it
        # (it is late and this selection is incredibely complicated ...)
        if np.any([True if constuse in e else False for e in sv_used_list]):
            # first find the selected sats for the good constellation
            sv_used_select_list = [
                int(e[1:]) for e in sv_used_list if constuse in e
            ]
            #and apply it
            sv_set = sorted(
                list(set(sv_set).intersection(set(sv_used_select_list))))

        for svv in sv_set:
            # First research : find corresponding epoch for the SV
            # this one is sufficent if there is no gaps (e.g. with 0.00000) i.e.
            # same nb of obs in the 2 files
            # NB : .reindex() is smart, it fills the DataFrame
            # with NaN
            try:
                D1sv_orig = D1window[D1window['sv'] == svv].reindex(epoc_set)
                D2sv_orig = D2window[D2window['sv'] == svv].reindex(epoc_set)
            except Exception as exce:
                print("ERR : Unable to re-index with an unique epoch")
                print(
                    "      are you sure there is no multiple-defined epochs for the same sat ?"
                )
                print(
                    "      it happens e.g. when multiple ACs are in the same DataFrame "
                )
                print(
                    "TIP : Filter the input Dataframe before calling this fct with"
                )
                print("      DF = DF[DF['AC'] == 'gbm']")
                raise exce

            # Second research, it is a security in case of gap
            # This step is useless, because .reindex() will fill the DataFrame
            # with NaN
            if len(D1sv_orig) != len(D2sv_orig):
                print("INFO : different epochs nbr for SV", svv,
                      len(D1sv_orig), len(D2sv_orig))
                epoc_sv_set = sorted(
                    list(
                        set(D1sv_orig.index).intersection(set(
                            D2sv_orig.index))))
                D1sv = D1sv_orig.loc[epoc_sv_set]
                D2sv = D2sv_orig.loc[epoc_sv_set]
            else:
                D1sv = D1sv_orig
                D2sv = D2sv_orig

            P1 = D1sv[['x', 'y', 'z']]
            P2 = D2sv[['x', 'y', 'z']]

            # Start ECEF => ECI
            if convert_ECEF_ECI:
                # Backup because the columns xyz will be reaffected
                #D1sv_bkp = D1sv.copy()
                #D2sv_bkp = D2sv.copy()

                P1b = conv.ECEF2ECI(
                    np.array(P1),
                    conv.dt_gpstime2dt_utc(P1.index.to_pydatetime(),
                                           out_array=True))
                P2b = conv.ECEF2ECI(
                    np.array(P2),
                    conv.dt_gpstime2dt_utc(P2.index.to_pydatetime(),
                                           out_array=True))

                D1sv[['x', 'y', 'z']] = P1b
                D2sv[['x', 'y', 'z']] = P2b

                P1 = D1sv[['x', 'y', 'z']]
                P2 = D2sv[['x', 'y', 'z']]
            # End ECEF => ECI

            if not RTNoutput:
                # Compatible with the documentation +
                # empirically tested with OV software
                # it is  P1 - P2 (and not P2 - P1)
                Delta_P = P1 - P2

                Diff_sat = Delta_P.copy()
                Diff_sat.columns = ['dx', 'dy', 'dz']

            else:
                rnorm = np.linalg.norm(P1, axis=1)

                Vx = utils.diff_pandas(D1sv, 'x')
                Vy = utils.diff_pandas(D1sv, 'y')
                Vz = utils.diff_pandas(D1sv, 'z')

                V = pd.concat((Vx, Vy, Vz), axis=1)
                V.columns = ['vx', 'vy', 'vz']

                R = P1.divide(rnorm, axis=0)
                R.columns = ['xnorm', 'ynorm', 'znorm']

                H = pd.DataFrame(np.cross(R, V), columns=['hx', 'hy', 'hz'])
                hnorm = np.linalg.norm(H, axis=1)

                C = H.divide(hnorm, axis=0)
                C.columns = ['hxnorm', 'hynorm', 'hznorm']

                I = pd.DataFrame(np.cross(C, R), columns=['ix', 'iy', 'iz'])

                R_ar = np.array(R)
                I_ar = np.array(I)
                C_ar = np.array(C)

                #R_ar[1]
                Beta = np.stack((R_ar, I_ar, C_ar), axis=1)

                # Compatible with the documentation +
                # empirically tested with OV software
                # it is  P1 - P2 (and not P2 - P1)
                Delta_P = P1 - P2

                # Final determination
                Astk = []

                for i in range(len(Delta_P)):
                    A = np.dot(Beta[i, :, :], np.array(Delta_P)[i])
                    Astk.append(A)

                Diff_sat = pd.DataFrame(np.vstack(Astk),
                                        index=P1.index,
                                        columns=['dr', 'dt', 'dn'])

            Diff_sat = Diff_sat * conv_coef  # metrer conversion

            Diff_sat['const'] = [constuse] * len(Diff_sat.index)
            Diff_sat['sv'] = [svv] * len(Diff_sat.index)
            Diff_sat['sat'] = [constuse + str(svv).zfill(2)] * len(
                Diff_sat.index)

            Diff_sat_stk.append(Diff_sat)

    Diff_sat_all = pd.concat(Diff_sat_stk)
    Date = Diff_sat.index[0]

    # Attribute definition
    if RTNoutput:
        Diff_sat_all.frame_type = 'RTN'

        # Pandas donesn't manage well iterable as attribute
        # So, it is separated
        Diff_sat_all.frame_col_name1 = 'dr'
        Diff_sat_all.frame_col_name2 = 'dt'
        Diff_sat_all.frame_col_name3 = 'dn'

    else:
        # Pandas donesn't manage well iterable as attribute
        # So, it is separated
        Diff_sat_all.frame_col_name1 = 'dx'
        Diff_sat_all.frame_col_name2 = 'dy'
        Diff_sat_all.frame_col_name3 = 'dz'

        if convert_ECEF_ECI:
            Diff_sat_all.frame_type = 'ECI'
        else:
            Diff_sat_all.frame_type = 'ECEF'

    # Name definitions
    if name1:
        Diff_sat_all.name1 = name1
    else:
        Diff_sat_all.name1 = D1orig.name

    if name2:
        Diff_sat_all.name2 = name2
    else:
        Diff_sat_all.name2 = D2orig.name

    Diff_sat_all.filename1 = D1orig.filename
    Diff_sat_all.filename2 = D2orig.filename

    Diff_sat_all.path1 = D1orig.path
    Diff_sat_all.path2 = D2orig.path

    Diff_sat_all.name = ' '.join(
        ('Orbits comparison (' + Diff_sat_all.frame_type + ') b/w',
         Diff_sat_all.name1, '(ref.) and', Diff_sat_all.name2, ',',
         Date.strftime("%Y-%m-%d"), ', doy', str(conv.dt2doy(Date))))

    if return_satNull:
        return Diff_sat_all, sat_nul
    else:
        return Diff_sat_all
Esempio n. 3
0
def regex_OR_from_list(listin):
    return "(" + utils.join_improved("|", *listin) + ")"
def sp3_overlap_creator(ac_list,
                        dir_in,
                        dir_out,
                        suffix_out_input=None,
                        overlap_size=7200,
                        force=False,
                        manage_missing_sats='common_sats_only',
                        eliminate_null_sat=True,
                        severe=False,
                        separated_systems_export=False,
                        first_date=None):
    """
    Generate an SP3 Orbit file with overlap based on the SP3s of the 
    days before and after
    
    Parameters
    ----------
    ac_list : list
        3-character codes of the ACs.
    dir_in : str
        where the input sp3 are.
    dir_out : str
         where the output sp3 will be outputed.
    suffix_out_input : str, optional
        last char of the 3-char. code. if None, then it is the same as input.
    overlap_size : int, optional
        Overlapsize. The default is 7200.
    force : True, optional
        force overwrite. The default is False.
    manage_missing_sats : str, optional
        'exclude' : generate a file with only the common sat 
        between the 3 days. Thus, exclude the missing sats
        'extrapolate' : extrapolate the missing sats based on the first/last epoch
        The default is 'common_sats_only'.
    eliminate_null_sat : bool, optional
        eliminate null sat. The default is True.
    severe : bool, optional
        raise an exception if problem. The default is False.
    separated_systems_export : bool, optional
        export different sp3 for different system. The default is False.
    first_date : datetime, optional
        exclude SP3 before this epoch

    Returns
    -------
    None.

    Note
    ----
    start/end date are not implemented
    the force option skips existing files 

    """

    Dict_Lfiles_ac = dict()

    for ac in ac_list:
        Dict_Lfiles_ac[ac] = []
        Lfile = Dict_Lfiles_ac[ac]

        Extlist = ["sp3", "SP3", "sp3.gz", "SP3.gz"]
        for ext in Extlist:
            Lfile = Lfile + utils.find_recursive(dir_in, "*" + ac + "*" + ext)
        print("Nb of SP3 found for", ac, len(Lfile))

        if not suffix_out_input:
            suffix_out = ac
        else:
            suffix_out = ac[:2] + suffix_out_input

        D = []
        WWWWD = []

        for sp3 in Lfile:
            #wwwwd_str = os.path.basename(sp3)[3:8]
            #D.append(conv.gpstime2dt(int(wwwwd_str[:4]),int(wwwwd_str[4:])))

            dat = conv.sp3name2dt(sp3)
            D.append(dat)

        for dat in D[1:-1]:  ####if selection manuel, zip > 2lists !!!
            try:
                print("***********", ac, dat)

                if first_date:
                    if dat < first_date:
                        print("INFO: SKIP date", dat)
                        continue

                wwwwd_str = conv.dt_2_sp3_datestr(dat).zfill(5)

                dat_bef = dat - dt.timedelta(days=1)
                dat_aft = dat + dt.timedelta(days=1)

                wwwwd_str_bef = utils.join_improved(
                    "", *conv.dt2gpstime(dat_bef)).zfill(5)
                wwwwd_str_aft = utils.join_improved(
                    "", *conv.dt2gpstime(dat_aft)).zfill(5)

                ###### check if exists
                dir_out_wk = os.path.join(dir_out, "wk" + str(wwwwd_str)[:4])
                utils.create_dir(dir_out_wk)
                fil_out = dir_out_wk + "/" + suffix_out + wwwwd_str + ".sp3"

                if not force and os.path.isfile(fil_out):
                    print("0))", fil_out, "exists, skipping...")
                    continue

                ### *************** STEP 1 ***************
                print("1)) Search for the days before/after")
                print("1))", dat_bef, dat_aft)

                p1 = utils.find_regex_in_list(wwwwd_str + ".sp3", Lfile, True)
                p_bef = utils.find_regex_in_list(wwwwd_str_bef + ".sp3", Lfile,
                                                 True)
                p_aft = utils.find_regex_in_list(wwwwd_str_aft + ".sp3", Lfile,
                                                 True)

                print("1)) Files found for the days before/after")
                print("0b)", p_bef)
                print("01)", p1)
                print("0a)", p_aft)

                if not p1 or not p_bef or not p_aft:
                    print("ERROR with day", dat)
                    continue

                SP3 = files_rw.read_sp3(p1)
                SP3_bef = files_rw.read_sp3(p_bef)
                SP3_aft = files_rw.read_sp3(p_aft)

                ### Filtering to keep P only
                SP3 = SP3[SP3.type == "P"]
                SP3_bef = SP3_bef[SP3_bef.type == "P"]
                SP3_aft = SP3_aft[SP3_aft.type == "P"]

                SP3_bef = SP3_bef[SP3_bef["epoch"] < SP3["epoch"].min()]
                SP3_aft = SP3_aft[SP3_aft["epoch"] > SP3["epoch"].max()]

                SP3concat = pd.concat((SP3_bef, SP3, SP3_aft))

                dat_filter_bef = dat - dt.timedelta(seconds=overlap_size)
                dat_filter_aft = dat + dt.timedelta(
                    seconds=overlap_size) + dt.timedelta(days=1)

                ### *************** STEP 2 ***************
                print("2)) dates of the overlap period before/after")
                print("2))", dat_filter_bef, dat_filter_aft)

                ### *************** STEP 3 ***************
                print("3)) Dates of: SP3 concatenated, before, current, after")
                print("3))", SP3concat["epoch"].min(),
                      SP3concat["epoch"].max())
                print("3b)", SP3_bef["epoch"].min(), SP3_bef["epoch"].max())
                print("31)", SP3["epoch"].min(), SP3["epoch"].max())
                print("3a)", SP3_aft["epoch"].min(), SP3_aft["epoch"].max())

                SP3concat = SP3concat[(SP3concat["epoch"] >= dat_filter_bef)
                                      & (SP3concat["epoch"] <= dat_filter_aft)]

                ########## HERE WE MANAGE THE MISSING SATS
                if manage_missing_sats == "exclude":
                    print("4))", "remove missing sats ")
                    common_sats = set(SP3_bef["sat"]).intersection(
                        set(SP3["sat"])).intersection(set(SP3_aft["sat"]))
                    SP3concat = SP3concat[SP3concat["sat"].isin(common_sats)]
                elif manage_missing_sats == "extrapolate":
                    print("4))", "extrapolate missing sats ")
                    for iovl, SP3_ovl in enumerate((SP3_bef, SP3_aft)):
                        if iovl == 0:
                            backward = True
                            forward = False
                            backfor = "backward"
                        elif iovl == 1:
                            backward = False
                            forward = True
                            backfor = "forward"

                        Sats = set(SP3["sat"])
                        Sats_ovl = set(SP3_ovl["sat"])

                        Sats_miss = Sats.difference(Sats_ovl)
                        if not Sats_miss:
                            continue
                        print("4a)", "extrapolate missing sats", backfor,
                              Sats_miss)

                        SP3extrapo_in = SP3concat[SP3concat["sat"].isin(
                            Sats_miss)]

                        #step = utils.most_common(SP3concat["epoch"].diff().dropna())
                        #step = step.astype('timedelta64[s]').astype(np.int32)
                        step = 900
                        #print(step)

                        #print("SP3extrapo_in",SP3extrapo_in)

                        SP3extrapo = reffram.extrapolate_sp3_DataFrame(
                            SP3extrapo_in,
                            step=step,
                            n_step=int(overlap_size / step),
                            backward=backward,
                            forward=forward,
                            until_backward=dat_filter_bef,
                            until_forward=dat_filter_aft,
                            return_all=False)

                        SP3concat = pd.concat((SP3concat, SP3extrapo))
                        print(SP3extrapo)

                else:
                    print("ERR: check manage_missing_sats value")
                    raise Exception

                if eliminate_null_sat:
                    GoodSats = []
                    for sat in SP3concat["sat"].unique():
                        XYZvals = SP3concat[SP3concat["sat"] == sat][[
                            "x", "y", "z"
                        ]].sum(axis=1)

                        V = np.sum(np.isclose(XYZvals, 0)) / len(XYZvals)

                        if V < 0.50:
                            GoodSats.append(sat)
                        else:
                            print("6) eliminate because null position", sat)

                    SP3concat = SP3concat[SP3concat["sat"].isin(GoodSats)]

                ### *************** STEP 7 ***************
                print("7))", "Start/End Epoch of the concatenated file ")
                print("7))", SP3concat["epoch"].min(),
                      SP3concat["epoch"].max())

                #### All systems
                print("8)) outputed file")
                print(fil_out)
                write_sp3(SP3concat, fil_out)

                #### system separated
                if False:
                    for sys in SP3concat["const"].unique():
                        try:
                            SP3concat_sys = SP3concat[SP3concat["const"] ==
                                                      sys]
                            fil_out_sys = dir_out_wk + "/" + suffix_out[:2] + sys.lower(
                            ) + wwwwd_str.zfill(5) + ".sp3"
                            print("9)) outputed file")
                            print(fil_out_sys)
                            write_sp3(SP3concat_sys, fil_out_sys)
                        except:
                            continue

            except KeyboardInterrupt:
                raise KeyboardInterrupt

            except Exception as e:
                if severe:
                    print("WARN:", e)
                    raise e
                else:
                    print("WARN: Error", e, "but no severe mode, continue...")
                    continue
    """
Esempio n. 5
0
def sp3_overlap_creator(ac_list,
                        dir_in,
                        dir_out,
                        suffix_out_input=None,
                        overlap_size=7200,
                        force=False,
                        common_sats_only=True,
                        eliminate_null_sat=True,
                        severe=False,
                        separated_systems_export=False):
    """
    Generate an SP3 Orbit file with overlap based on the SP3s of the 
    days before and after
    
    Parameters
    ----------
    ac_list : list
        3-character codes of the ACs.
    dir_in : str
        where the input sp3 are.
    dir_out : str
         where the output sp3 will be outputed.
    suffix_out_input : str, optional
        last char of the 3-char. code. if None, then it is the same as input.
    overlap_size : int, optional
        Overlapsize. The default is 7200.
    force : True, optional
        force overwrite. The default is False.
    common_sats_only : True, optional
        generate a file with only the common sat between the 3 days.
        The default is True.
    eliminate_null_sat : bool, optional
        eliminate null sat. The default is True.
    severe : bool, optional
        raise an exception if problem. The default is False.
    separated_systems_export : bool, optional
        export different sp3 for different system. The default is False.

    Returns
    -------
    None.

    """

    for ac in ac_list:
        Lfile = utils.find_recursive(dir_in, "*" + ac + "*sp3")

        if not suffix_out_input:
            suffix_out = ac
        else:
            suffix_out = ac[:2] + suffix_out_input

        D = []
        WWWWD = []

        for sp3 in Lfile:
            wwwwd_str = os.path.basename(sp3)[3:8]
            D.append(conv.gpstime2dt(int(wwwwd_str[:4]), int(wwwwd_str[4:])))

        for dat in D[1:-1]:  ####if selection manuel, zip > 2lists !!!
            try:
                print("******", ac, dat)

                if conv.dt2gpstime(dat)[0] < 1800:
                    print("SKIP", dat)
                    continue

                wwwwd_str = conv.dt_2_sp3_datestr(dat)

                dat_bef = dat - dt.timedelta(days=1)
                dat_aft = dat + dt.timedelta(days=1)

                wwwwd_str_bef = utils.join_improved("",
                                                    *conv.dt2gpstime(dat_bef))
                wwwwd_str_aft = utils.join_improved("",
                                                    *conv.dt2gpstime(dat_aft))

                ###### check if exsists
                dir_out_wk = os.path.join(dir_out, "wk" + str(wwwwd_str)[:4])
                utils.create_dir(dir_out_wk)
                fil_out = dir_out_wk + "/" + suffix_out + wwwwd_str + ".sp3"

                if not force and os.path.isfile(fil_out):
                    print("0))", fil_out, "exsists, skipping...")
                    continue

                ### *************** STEP 1 ***************
                print("1)) Search for the days before/after")
                print("1))", dat_bef, dat_aft)

                p1 = utils.find_regex_in_list(wwwwd_str + ".sp3", Lfile, True)
                p_bef = utils.find_regex_in_list(wwwwd_str_bef + ".sp3", Lfile,
                                                 True)
                p_aft = utils.find_regex_in_list(wwwwd_str_aft + ".sp3", Lfile,
                                                 True)

                print("1)) Files found for the days before/after")
                print("0b)", p_bef)
                print("01)", p1)
                print("0a)", p_aft)

                if not p1 or not p_bef or not p_aft:
                    print("ERROR with day", dat)
                    continue

                SP3 = files_rw.read_sp3(p1)
                SP3_bef = files_rw.read_sp3(p_bef)
                SP3_aft = files_rw.read_sp3(p_aft)

                SP3_bef = SP3_bef[SP3_bef["epoch"] < SP3["epoch"].min()]
                SP3_aft = SP3_aft[SP3_aft["epoch"] > SP3["epoch"].max()]

                SP3concat = pd.concat((SP3_bef, SP3, SP3_aft))

                dat_filter_bef = dat - dt.timedelta(seconds=overlap_size)
                dat_filter_aft = dat + dt.timedelta(
                    seconds=overlap_size) + dt.timedelta(days=1)

                ### *************** STEP 2 ***************
                print("2)) dates of the overlap period before/after")
                print("2))", dat_filter_bef, dat_filter_aft)

                ### *************** STEP 3 ***************
                print("3)) Dates of: SP3 concatenated, before, current, after")
                print("3))", SP3concat["epoch"].min(),
                      SP3concat["epoch"].max())
                print("3b)", SP3_bef["epoch"].min(), SP3_bef["epoch"].max())
                print("31)", SP3["epoch"].min(), SP3["epoch"].max())
                print("3a)", SP3_aft["epoch"].min(), SP3_aft["epoch"].max())

                SP3concat = SP3concat[(SP3concat["epoch"] >= dat_filter_bef)
                                      & (SP3concat["epoch"] <= dat_filter_aft)]

                if common_sats_only:
                    common_sats = set(SP3_bef["sat"]).intersection(
                        set(SP3["sat"])).intersection(set(SP3_aft["sat"]))
                    SP3concat = SP3concat[SP3concat["sat"].isin(common_sats)]

                if eliminate_null_sat:
                    GoodSats = []
                    for sat in SP3concat["sat"].unique():
                        XYZvals = SP3concat[SP3concat["sat"] == sat][[
                            "x", "y", "z"
                        ]].sum(axis=1)

                        V = np.sum(np.isclose(XYZvals, 0)) / len(XYZvals)

                        if V < 0.50:
                            GoodSats.append(sat)
                        else:
                            print("6) eliminate because null position", sat)

                    SP3concat = SP3concat[SP3concat["sat"].isin(GoodSats)]

                ### *************** STEP 7 ***************
                print("7))", "Start/End Epoch of the concatenated file ")
                print("7))", SP3concat["epoch"].min(),
                      SP3concat["epoch"].max())

                #### All systems
                dir_out_wk = os.path.join(dir_out, "wk" + str(wwwwd_str)[:4])
                utils.create_dir(dir_out_wk)
                fil_out = dir_out_wk + "/" + suffix_out + wwwwd_str + ".sp3"
                print("8)) outputed file")
                print(fil_out)
                write_sp3(SP3concat, fil_out)

                #### system separated
                if False:
                    for sys in SP3concat["const"].unique():
                        try:
                            SP3concat_sys = SP3concat[SP3concat["const"] ==
                                                      sys]
                            fil_out_sys = dir_out_wk + "/" + suffix_out[:2] + sys.lower(
                            ) + wwwwd_str + ".sp3"
                            print("9)) outputed file")
                            print(fil_out_sys)
                            write_sp3(SP3concat_sys, fil_out_sys)
                        except:
                            continue

            except KeyboardInterrupt:
                raise KeyboardInterrupt

            except Exception as e:
                if severe:
                    raise e
                else:
                    print("ERR:", e)
                    raise e
    """