def parseClockBias(statsClk: tempfile._TemporaryFileWrapper, logger: logging.Logger) -> pd.DataFrame: """ parse the clock file """ cFuncName = colored(os.path.basename(__file__), 'yellow') + ' - ' + colored( sys._getframe().f_code.co_name, 'green') logger.info('{func:s}: parsing RTKLib clock statistics {file:s}'.format( func=cFuncName, file=statsClk.name)) # # read in the satellite status file # print('colNames = {!s}'.format(rtkc.dRTKPosStat['Clk']['colNames'])) # print('useNames = {!s}'.format(rtkc.dRTKPosStat['Clk']['useCols'])) # with open(statsClk.name, 'r') as fstat: # i = 0 # for line in fstat: # print(line, end='') # i = i + 1 # if i == 10: # break # input("Press Enter to continue...") # read in the satellite status file dfCLKs = pd.read_csv(statsClk.name, header=None, sep=',', usecols=[*range(1, 9)]) dfCLKs.columns = rtkc.dRTKPosStat['Clk']['useCols'] amutils.printHeadTailDataFrame(df=dfCLKs, name='dfCLKs range') # # read in the satellite status file # dfCLKs = pd.read_csv(statsClk.name, header=None, sep=',', names=rtkc.dRTKPosStat['Clk']['colNames'], usecols=rtkc.dRTKPosStat['Clk']['useCols']) # amc.logDataframeInfo(df=dfCLKs, dfName='dfCLKs', callerName=cFuncName, logger=logger) # replace the headers cols = np.asarray(rtkc.dRTKPosStat['Clk']['useCols'][-4:]) # if value of clk parameters is 0 replace by NaN dfCLKs[cols] = dfCLKs[cols].replace({0: np.nan}) # add DateTime dfCLKs['DT'] = dfCLKs.apply( lambda x: gpstime.UTCFromWT(x['WNC'], x['TOW']), axis=1) amc.logDataframeInfo(df=dfCLKs, dfName='dfCLKs', callerName=cFuncName, logger=logger) amutils.logHeadTailDataFrame(logger=logger, callerName=cFuncName, df=dfCLKs, dfName='dfCLKs') return dfCLKs
def parse_glab_output(glab_output: tempfile._TemporaryFileWrapper, logger: logging.Logger) -> pd.DataFrame: """ parse_glab_output parses the OUTPUT section of the glab out file """ cFuncName = colored(os.path.basename(__file__), 'yellow') + ' - ' + colored( sys._getframe().f_code.co_name, 'green') logger.info( '{func:s}: Parsing gLab OUTPUT section {file:s} ({info:s})'.format( func=cFuncName, file=glab_output.name, info=colored('be patient', 'red'))) # read gLABs OUTPUT into dataframe (cropping cartesian colmuns) df_output = pd.read_csv(glab_output.name, header=None, delim_whitespace=True, usecols=[ *range(1, 11), *range(20, len(glc.dgLab['OUTPUT']['columns'])) ]) # name the colmuns df_output.columns = glc.dgLab['OUTPUT']['use_cols'] # tranform time column to python datetime.time and add a DT column df_output['Time'] = df_output['Time'].apply( lambda x: dt.datetime.strptime(x, '%H:%M:%S.%f').time()) df_output['DT'] = df_output.apply( lambda x: make_datetime(x['Year'], x['DoY'], x['Time']), axis=1) # find gaps in the data by comparing to mean value of difference in time df_output['dt_diff'] = df_output['DT'].diff(1) dtMean = df_output['dt_diff'].mean() # look for it using location indexing df_output.loc[df_output['dt_diff'] > dtMean, '#SVs'] = np.nan df_output.loc[df_output['dt_diff'] > dtMean, 'PDOP'] = np.nan # add UTM coordinates df_output['UTM.E'], df_output['UTM.N'], _, _ = utm.from_latlon( df_output['lat'].to_numpy(), df_output['lon'].to_numpy()) logger.info('{func:s}: df_output info\n{dtypes!s}'.format( dtypes=df_output.info(), func=cFuncName)) amutils.printHeadTailDataFrame( df=df_output, name='OUTPUT section of {name:s}'.format(name=amc.dRTK['glab_out']), index=False) return df_output
def parseSatelliteStatistics(statsSat: tempfile._TemporaryFileWrapper, logger: logging.Logger) -> pd.DataFrame: """ parseSatelliteStatistics reads the SAT statitics file into a dataframe """ # set current function name cFuncName = colored(os.path.basename(__file__), 'yellow') + ' - ' + colored( sys._getframe().f_code.co_name, 'green') logger.info( '{func:s}: Parsing RTKLib satellites file {file:s} ({info:s})'.format( func=cFuncName, file=statsSat.name, info=colored('be patient', 'red'))) dfSat = pd.read_csv(statsSat.name, header=None, sep=',', usecols=[*range(1, 11)]) dfSat.columns = rtkc.dRTKPosStat['Res']['useCols'] amutils.printHeadTailDataFrame(df=dfSat, name='dfSat range') # dfSat = pd.read_csv(statsSat.name, header=None, sep=',', names=rtkc.dRTKPosStat['Res']['colNames'], usecols=rtkc.dRTKPosStat['Res']['useCols']) # amutils.printHeadTailDataFrame(df=dfSat, name='dfSat usecol') # sys.exit(77) # add DT column dfSat['DT'] = dfSat.apply(lambda x: gpstime.UTCFromWT(x['WNC'], x['TOW']), axis=1) # if PRres == 0.0 => than I suppose only 4 SVs used, so no residuals can be calculated, so change to NaN dfSat.PRres.replace(0.0, np.nan, inplace=True) amc.logDataframeInfo(df=dfSat, dfName='dfSat', callerName=cFuncName, logger=logger) amutils.logHeadTailDataFrame(logger=logger, callerName=cFuncName, df=dfSat, dfName='dfSat') return dfSat
def statistics_dopbin(df_dop_enu: pd.DataFrame, logger: logging.Logger) -> dict: """ statistics_dopbin calculates the xDOP statistics for each xDOP bin """ cFuncName = colored(os.path.basename(__file__), 'yellow') + ' - ' + colored( sys._getframe().f_code.co_name, 'green') logger.info( '{func:s}: calculating statistics of xDOP'.format(func=cFuncName)) dStats_dop = {} amutils.printHeadTailDataFrame(df=df_dop_enu, name='df_dop_enu') # go over all PDOP bins and plot according to the markersBin defined for i in range(len(amc.dRTK['dop_bins']) - 1): bin_PDOP = 'bin{:d}-{:.0f}'.format(amc.dRTK['dop_bins'][i], amc.dRTK['dop_bins'][i + 1]) logger.debug('{func:s}: bin_PDOP = {bin!s}'.format(bin=bin_PDOP, func=cFuncName)) # create the dict for this PDOP interval dStats_dop[bin_PDOP] = {} # find the indices within this bin index4Bin = (df_dop_enu['PDOP'] > amc.dRTK['dop_bins'][i]) & ( df_dop_enu['PDOP'] <= amc.dRTK['dop_bins'][i + 1]) dStats_dop[bin_PDOP]['perc'] = index4Bin.mean() dStats_dop[bin_PDOP]['count'] = int(index4Bin.count() * index4Bin.mean()) for dENU, sdENU in zip(glc.dgLab['OUTPUT']['dENU'], glc.dgLab['OUTPUT']['sdENU']): dENU_stats = {} dENU_stats['wavg'] = amutils.wavg(df_dop_enu.loc[index4Bin], dENU, sdENU) dENU_stats['sdwavg'] = amutils.stddev( df_dop_enu.loc[index4Bin, dENU], dENU_stats['wavg']) # dENU_stats['mean'] = df_dop_enu.loc[index4Bin, dENU].mean() dENU_stats['median'] = df_dop_enu.loc[index4Bin, dENU].median() # dENU_stats['stddev'] = df_dop_enu.loc[index4Bin, dENU].std() dENU_stats['min'] = df_dop_enu.loc[index4Bin, dENU].min() dENU_stats['max'] = df_dop_enu.loc[index4Bin, dENU].max() # add for this crd dENU dStats_dop[bin_PDOP][dENU] = dENU_stats logger.debug( '{func:s}: in {bin:s} statistics for {crd:s} are {stat!s}'. format(func=cFuncName, bin=bin_PDOP, crd=dENU, stat=dENU_stats)) # report to the user logger.info('{func:s}: dStats_dop =\n{json!s}'.format( func=cFuncName, json=json.dumps(dStats_dop, sort_keys=False, indent=4, default=amutils.DT_convertor))) return dStats_dop
def statistics_coordinates(df_crd: pd.DataFrame, logger: logging.Logger) -> dict: """ statistics_coordinates calculates the coordinate statistics """ cFuncName = colored(os.path.basename(__file__), 'yellow') + ' - ' + colored( sys._getframe().f_code.co_name, 'green') logger.info( '{func:s}: calculating coordinate statistics'.format(func=cFuncName)) # init class WGS84 wgs_84 = wgs84.WGS84() amutils.printHeadTailDataFrame(df=df_crd, name='df_crd', index=False) dStat = {} for crd in (glc.dgLab['OUTPUT']['llh'] + glc.dgLab['OUTPUT']['dENU'] + glc.dgLab['OUTPUT']['UTM']): dStat[crd] = {} # make sure to have the wavg for latitude since it is used for converting the stddev of geodetic coordinates into meter dStat['lat']['wavg'] = amutils.wavg(df_crd, 'lat', 'dN0') for llh, sdENU in zip(glc.dgLab['OUTPUT']['llh'], glc.dgLab['OUTPUT']['sdENU']): dStat[llh]['wavg'] = amutils.wavg(df_crd, llh, sdENU) if llh == 'lat': dStat[llh]['sdwavg'] = math.radians( amutils.stddev(df_crd[llh], dStat['lat']['wavg'])) * wgs_84.a elif llh == 'lon': dStat[llh]['sdwavg'] = math.radians( amutils.stddev(df_crd[llh], dStat['lat']['wavg'])) * wgs_84.a * math.cos( math.radians(dStat['lat']['wavg'])) else: dStat[llh]['sdwavg'] = amutils.stddev(df_crd[llh], dStat[llh]['wavg']) for dENU, sdENU in zip(glc.dgLab['OUTPUT']['dENU'], glc.dgLab['OUTPUT']['sdENU']): dStat[dENU]['wavg'] = amutils.wavg(df_crd, dENU, sdENU) dStat[dENU]['sdwavg'] = amutils.stddev(df_crd[dENU], dStat[dENU]['wavg']) for dUTM, sdENU in zip(glc.dgLab['OUTPUT']['UTM'], glc.dgLab['OUTPUT']['sdENU'][:2]): dStat[dUTM]['wavg'] = amutils.wavg(df_crd, dUTM, sdENU) dStat[dUTM]['sdwavg'] = amutils.stddev(df_crd[dUTM], dStat[dENU]['wavg']) # calculate statistics for the nuùeric values for crd in (glc.dgLab['OUTPUT']['llh'] + glc.dgLab['OUTPUT']['dENU'] + glc.dgLab['OUTPUT']['UTM']): dStat[crd]['mean'] = df_crd[crd].mean() dStat[crd]['median'] = df_crd[crd].median() dStat[crd]['std'] = df_crd[crd].std() dStat[crd]['max'] = df_crd[crd].max() dStat[crd]['min'] = df_crd[crd].min() # results of gLAB kalman filter dStat[crd]['kf'] = df_crd[crd].iloc[-1] try: dStat[crd]['sdkf'] = df_crd['s{:s}'.format(crd[:2])].iloc[-1] except KeyError: dStat[crd]['sdkf'] = np.nan logger.info('{func:s}: OUTPUT statistics information =\n{json!s}'.format( func=cFuncName, json=json.dumps(dStat, sort_keys=False, indent=4, default=amutils.DT_convertor))) return dStat