import argparse, os, sys import pickle import numpy as np import scipy import matplotlib.pyplot as plt sys.path.append(os.path.join(os.getcwd(), "pyutils")) from utils import (get_xivo_output_filename, get_xivo_gt_filename, get_stock_parser, chi2_divergence, state_size, chi2_div_draws) from pltutils import chi2_overlay from constants import EVILOCARINA_DNNDUMP, KIF_DNNDUMP from eval_cov_calibration import CovarianceCalibration parser = get_stock_parser( 'Generates plots for evaluation of covariance quality.', 'Dataset options') analysis_args = parser.add_argument_group('Analysis Options') analysis_args.add_argument( '-cov_source', default='original', help= 'Covariance used to generate plots. Options are [ original | sampled | scalar | linear | neural_net ]' ) analysis_args.add_argument('-network_model', help='Tensorflow model and config') analysis_args.add_argument('-network_dump', default=EVILOCARINA_DNNDUMP, help='Directory where network is located') analysis_args.add_argument( '-sample_size',
import argparse, os, sys import json import numpy as np import matplotlib.pyplot as plt sys.path.append(os.path.join(os.getcwd(), "pyutils")) from estimator_data import EstimatorData from utils import from_upper_triangular_list, get_stock_parser, \ get_xivo_output_filename, get_xivo_gt_filename parser = get_stock_parser("Plots covariance matrix as colormap.") class CovariancePlot: def __init__(self, estimator_results, seq): self.seq = seq self.est = EstimatorData(estimator_results) def plot_cov(self, ind): plt.figure() plt.title("{} covariance for timestep {}".format(self.seq, ind)) ax = plt.gca() im = ax.imshow(self.est.P[:, :, ind], cmap='RdBu') ax.figure.colorbar(im, ax=ax) def user_select_plot(self): while True: print("Index range: {} to {}".format(self.est.start_ind, self.est.end_ind)) index = int(input("Select a timestep: ")) self.plot_cov(index)
if (m!=i and m!=d): dgii_daid += Pest[d,m]*A[i,m] for l in range(cov_dim): if (l!=i and l!=d): dgii_daid += Pest[l,d]*A[i,l] dgii_daid += 2*Pest[d,d]*A[i,d] grad[i,d] += 2 * gijk * dgii_daid else: raise ValueError("We're not supposed to be here!") grad = np.reshape(grad, (cov_dim*cov_dim,)) return grad # Command line arguments parser = get_stock_parser("Compute a linear adjust to the estimated covariances for a given sequence by solving a NLP") parser.add_argument("-check_grad", default=False, action="store_true") parser.add_argument("-algorithm", default="ipopt") parser.add_argument("-estimate_jac", default=False, action="store_true") parser.add_argument("-cov_type", default="W") parser.add_argument("-use_weights", default=False, action="store_true") args = parser.parse_args() weights = {} if args.use_weights: weights["weightW"] = 10. weights["weightT"] = 10. weights["weightV"] = 10. weights["weightWW"] = 2.5 weights["weightTT"] = 2.5
import sys, os, argparse import numpy as np from scipy.spatial.transform import Rotation import cvxpy as cvx sys.path.append(os.path.join(os.getcwd(), "pyutils")) from estimator_data import EstimatorData from utils import from_upper_triangular_list, state_indices, get_stock_parser, \ get_xivo_output_filename, idx_to_state # Command line arguments parser = get_stock_parser( "Solves an optimization problem to compute a single scalar adjustment to the estimated covariances for a single sequence." ) parser.add_argument("-use_weights", default=False, action="store_true") parser.add_argument("-cov_type", default="WTV") args = parser.parse_args() estimator_datafiles = [] estimators = [] test_estimators = [] test_datafiles = [] # Load data for cam_id in [0, 1]: for seq in ["room1", "room2", "room3", "room4", "room5", "room6"]: estimator_datafile = get_xivo_output_filename(args.dump, args.dataset, seq, cam_id=cam_id)
import argparse import os, glob import sys sys.path.insert(0, os.path.join(os.environ['XIVO_ROOT'], 'lib')) import pyxivo import savers sys.path.append(os.path.join(os.getcwd(), "pyutils")) from utils import get_stock_parser from constants import COSYVIO_COLLECTION_TIME parser = get_stock_parser("Main Python interface for running XIVO.") parser.add_argument( '-cfg', default='cfg/tumvi_cam0.json', help='path to the estimator configuration') parser.add_argument( '-use_viewer', default=False, action='store_true', help='visualize trajectory and feature tracks if set') parser.add_argument( '-mode', default='eval', help='[eval|dump|dumpCov|runOnly] mode to handle the state estimates. eval: save states for evaluation; dump: save to json file for further processing') parser.add_argument( '-save_full_cov', default=False, action='store_true', help='save the entire covariance matrix, not just that of the motion state, if set' ) parser.add_argument('-collection_time', default=COSYVIO_COLLECTION_TIME) def main(args):
import os import argparse import numpy as np import json import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from scipy.spatial.transform import Rotation from scipy.special import gamma sys.path.append(os.path.join(os.getcwd(), "pyutils")) from interpolate_gt import groundtruth_interpolator from utils import from_upper_triangular_list, cleanup_and_load_json, \ get_stock_parser, get_xivo_output_filename, get_xivo_gt_filename parser = get_stock_parser("Plot point cloud of estimated group locations") parser.add_argument('-plot_deviations', default=True, type=bool, help='whether to plot deviations from the sample mean or the exact positions') parser.add_argument('-max_suggestion', default=25, type=int, help='how many feature IDs to suggest for plotting') class GroupDetections: def __init__(self, estimator_results, seq): self.seq = seq # store data from files self.estimator_results = cleanup_and_load_json(estimator_results) self.n_timestaps = len(self.estimator_results["data"])
import os, sys import numpy as np import scipy sys.path.append(os.path.join(os.getcwd(), "pyutils")) from utils import get_stock_parser, get_xivo_gt_filename, \ get_xivo_output_filename from eval_cov_calibration import CovarianceCalibration parser = get_stock_parser("Prints mean state and state errors across the" + " whole TUM VI dataset.") args = parser.parse_args() # Data to collect translation_errors = np.zeros((0, 3)) rotation_errors = np.zeros((0, 3)) velocity_errors = np.zeros((0, 3)) translations = np.zeros((0, 3)) rotations = np.zeros((0, 3)) velocities = np.zeros((0, 3)) for cam_id in [0, 1]: for seq in ["room1", "room2", "room3", "room4", "room5", "room6"]: print("Cam {}, {}".format(cam_id, seq)) estimator_data = get_xivo_output_filename(args.dump, "tumvi", seq, cam_id=cam_id) gt_data = get_xivo_gt_filename(args.dump, "tumvi", seq)
import sys import os import argparse import numpy as np import json import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D sys.path.append(os.path.join(os.getcwd(), "pyutils")) from interpolate_gt import groundtruth_interpolator from utils import from_upper_triangular_list, cleanup_and_load_json, \ get_stock_parser, get_xivo_output_filename, get_xivo_gt_filename parser = get_stock_parser("Plot point cloud of features") parser.add_argument( '-plot_deviations', default=True, type=bool, help= 'whether to plot deviations from the sample mean or the exact positions') parser.add_argument('-max_suggestion', default=25, type=int, help='how many feature IDs to suggest for plotting') class FeatureDetections: def __init__(self, estimator_results, seq): self.seq = seq # store data from files
import os, sys import argparse import numpy as np import matplotlib.pyplot as plt sys.path.append(os.path.join(os.getcwd(), "pyutils")) from utils import calc_avg_sampling_freq, get_stock_parser parser = get_stock_parser("Plots timeseries and FFTs of IMU data") class IMUData: def __init__(self, imu_data_file): self.data_filepath = imu_data_file self.timestamps = [] self.data = [] self.read_data() self.sampling_freq = calc_avg_sampling_freq(self.timestamps) def read_data(self): with open(self.data_filepath, 'r') as fid: for line in fid.readlines(): # skip the first line if line[0] == '#': continue larr = line.split(',') self.timestamps.append(float(larr[0])) self.data.append([ float(larr[1]),