def main(): parser = ArgumentParser() parser.add_argument('--config_file', type=str, required=True) args = parser.parse_args() # settings config_file = Path(args.config_file) config = Config.load(config_file) output_dir = Path(f'./output/{config_file.stem}/') output_dir.mkdir(parents=True, exist_ok=True) set_seed(config.seed) logger = getLogger(__name__) setup_logger(logger, output_dir / 'log.txt') # load data data_bundle = build_data_module(config) # train runner = build_runner(config) runner.run(data_bundle) # save file runner.save(output_dir)
def main(): args = utils.parse_args() log_file = Path("{}.log".format(datetime.now().strftime('%Y%m%d_%H%M%S'))) utils.setup_logger(log_path=Path.cwd() / args.log_dir / log_file, log_level=args.log_level) ood_data_experiment()
def main(): log = logging.getLogger(__name__) experiment_name = "damped_plf_arctan" setup_logger(f"logs/{experiment_name}.log", logging.INFO) log.info(f"Running experiment: {experiment_name}") seed = 2 np.random.seed(seed)
def main(): args = utils.parse_args() log_file = Path("{}.log".format(datetime.now().strftime('%Y%m%d_%H%M%S'))) utils.setup_logger(log_path=Path.cwd() / args.log_dir / log_file, log_level=args.log_level) LOGGER.info("Args: {}".format(args)) train_distilled_network_dirichlet() predictions_corrupted_data_dirichlet()
def main(): log = logging.getLogger(__name__) experiment_name = "affine_problem" setup_logger(f"logs/{experiment_name}.log", logging.INFO) log.info(f"Running experiment: {experiment_name}") K = 20 prior_mean = np.array([1, 1, 3, 2]) prior_cov = 1 * np.eye(4) T = 1 A = np.array([[1, 0, T, 0], [0, 1, 0, T], [0, 0, 1, 0], [0, 0, 0, 1]]) b = 0 * np.ones((4, )) Q = np.array([ [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1.5, 0], [0, 0, 0, 1.5], ]) motion_model = AffineModel(A, b, Q) H = np.array([[1, 0, 0, 0], [0, 1, 0, 0]]) c = np.zeros((H @ prior_mean).shape) R = 2 * np.eye(2) meas_model = AffineModel(H, c, R) true_x = sim_affine_state_seq(prior_mean, prior_cov, motion_model, K) y = sim_affine_meas_seq(true_x, meas_model) analytical_smooth = RtsSmoother(motion_model, meas_model) mf, Pf, ms, Ps, _ = analytical_smooth.filter_and_smooth( y, prior_mean, prior_cov, None) vis.plot_nees_and_2d_est(true_x, y, [(mf, Pf, "KF"), (ms, Ps, "RTSS")], sigma_level=3, skip_cov=2)
def setup_logging(): placeholder = st.empty() st_handler = StreamlitHandler(placeholder) setup_logger(None, handler=st_handler)
import os import joblib from threading import Lock import json import math import time from tensorflow.keras.callbacks import Callback # My files import config as Config from src.utils import setup_logger, attach_logger_to_stdout os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' last_model_number_lock = Lock() train_logger = setup_logger('train_logger', './logs/train.log') class MyCustomCallback(Callback): global train_logger def on_epoch_begin(self, epoch, logs=None): train_logger.info(f'Starting epoch {epoch}') def on_epoch_end(self, epoch, logs=None): train_logger.info( f"Train loss: {logs['loss']}, test loss: {logs['val_loss']}") def get_last_model_number(): path = os.path.join(os.getcwd(), Config.models_directory_path)
import logging from pymongo import MongoClient from flask_restful import Resource, reqparse from src import log_file, log_level, address_mongo, port_mongo from src.utils import setup_logger USER_LOGGER = setup_logger(logging.getLogger("USER"), log_level, log_file) class User(Resource): def post(self): parser = reqparse.RequestParser() parser.add_argument("username", type=str) parser.add_argument("password", type=str) body_args = parser.parse_args() USER_LOGGER.debug("Username: {}".format(body_args.username)) mongodb_client = MongoClient(address_mongo, port_mongo) db = mongodb_client.ecg collection = db.user res = collection.find_one({"username": body_args.username}) if res is None: return {"message": "User not existed"}, 500 if body_args.password == res["password"]: return {"message": "OK"}, 200 else: return {"message": "Wrong password"}, 401 USER_LOGGER.debug("Result: {}".format(res))
import threading import time from PyQt5 import QtWidgets, QtGui from PyQt5.QtCore import Qt # My files from src.webcam_capturer import get_webcam_image from src.utils import setup_logger import config as Config # ui from src.ui.eye_contour import EyeContour from src.ui.eye_widget import EyeWidget from src.ui.ui_utils import get_qimage_from_cv2, build_button from src.ui.base_gui import BaseGUI dc_logger = setup_logger('dc_logger', './logs/data_collector.log') class DataCollectorGUI(BaseGUI): def __init__(self, controller): super().__init__(controller) self.eye_widget = EyeWidget() self.face_detector = None def create_window(self): self.setWindowTitle('Data Collector') self.webcam_image_widget = QtWidgets.QLabel() self.left_eye_contour = EyeContour(self.webcam_image_widget) self.right_eye_contour = EyeContour(self.webcam_image_widget) self.setCentralWidget(self.webcam_image_widget)
FILE_NAME = '/model_' + args.model_name + '_training_args.txt' with open(args.save_w_dir + FILE_NAME, 'w') as f: json.dump(dict_to_save, f, indent=2) return if __name__ == "__main__": # parse arguments and setup logger parser = setup_args_parser() args_temp = parser.parse_args() args = parse_args_further(args_temp) logger = u.setup_logger(args.debug) log_args(args) # for reproducibility" np.random.seed(args.seed) torch.manual_seed(args.seed) if str(args.device) in ['cuda', 'cuda:0', 'cuda:1']: torch.cuda.manual_seed(args.seed) # create directories for saving if not args.debug: os.makedirs( args.save_h_dir) if not os.path.exists(args.save_h_dir) else None os.makedirs( args.save_w_dir) if not os.path.exists(args.save_w_dir) else None
def main(): np.random.seed(1) args = parse_args() log = logging.getLogger(__name__) experiment_name = "ipls" setup_logger(f"logs/{experiment_name}.log", logging.DEBUG) log.info(f"Running experiment: {experiment_name}") K = 50 D_x = 1 motion_model = NonStationaryGrowth(alpha=0.9, beta=10, gamma=8, delta=1.2, proc_noise=1) meas_model = (Cubic(coeff=1 / 20, meas_noise=1) if args.meas_type == MeasType.Cubic else Quadratic(coeff=1 / 20, meas_noise=1)) # LM hyper params lambda_ = 1e-2 nu = 10 prior_mean = np.atleast_1d(5) prior_cov = np.atleast_2d([4]) # MC DATA # num_mc_runs = 1000 # 1000 in original exp # num_mc_per_traj = 50 # num_trajs = num_mc_runs // num_mc_per_traj # trajs, noise, _, _ = get_specific_states_from_file(Path.cwd() / "data/ipls_paper") # assert trajs.shape == (K, num_trajs) # assert noise.shape == (K, num_mc_runs) # meas = gen_measurements(traj, noise[:, 0], meas_model) states, meas = simulate_data(K, prior_mean, prior_cov, motion_model, meas_model) # The paper simply states that "[t]he SLRs have been implemented using the unscented transform # with N = 2 D_x + 1 sigma-points and the weight of the sigma-point located on the mean is 1/3." # The following settings ensures that: # a) w_0 (the weight of the mean sigma point) is 1/3 # b) there is no difference for the weights for the mean and cov estimation sigma_point_method = UnscentedTransform(1, 0, 1 / 2) assert sigma_point_method.weights(D_x)[0][0] == 1 / 3 assert np.allclose( sigma_point_method.weights(D_x)[0], sigma_point_method.weights(D_x)[1]) # traj_idx = _mc_iter_to_traj_idx(0, num_mc_per_traj) # traj = trajs[:, traj_idx].reshape((K, 1)) # traj = traj[:min_K, :] results = [] cost_fn_eks = partial( analytical_smoothing_cost_time_dep, measurements=meas, m_1_0=prior_mean, P_1_0=prior_cov, motion_model=motion_model, meas_model=meas_model, ) ieks = Ieks(motion_model, meas_model, args.num_iter) ms_ieks, Ps_ieks, cost_ieks, rmses_ieks, neeses_ieks = ieks.filter_and_smooth( meas, prior_mean, prior_cov, cost_fn_eks, ) results.append((ms_ieks, Ps_ieks, "IEKS"), ) lm_ieks = LmIeks(motion_model, meas_model, args.num_iter, 10, lambda_=lambda_, nu=nu) ms_lm_ieks, Ps_lm_ieks, cost_lm_ieks, rmses_lm_ieks, neeses_lm_ieks = lm_ieks.filter_and_smooth( meas, prior_mean, prior_cov, cost_fn_eks, ) results.append((ms_lm_ieks, Ps_lm_ieks, "LM-IEKS"), ) cost_fn_ipls = partial( slr_smoothing_cost_pre_comp, measurements=meas, m_1_0=prior_mean, P_1_0_inv=np.linalg.inv(prior_cov), ) ipls = SigmaPointIpls(motion_model, meas_model, sigma_point_method, args.num_iter) _, _, ipls_ms, ipls_Ps, _ = ipls.filter_and_smooth(meas, prior_mean, prior_cov, cost_fn=cost_fn_ipls) results.append((ipls_ms, ipls_Ps, "IPLS")) lm_ipls = SigmaPointLmIpls(motion_model, meas_model, sigma_point_method, args.num_iter, cost_improv_iter_lim=10, lambda_=lambda_, nu=nu) _, _, lm_ipls_ms, lm_ipls_Ps, _ = lm_ipls.filter_and_smooth( meas, prior_mean, prior_cov, cost_fn=cost_fn_ipls) results.append((lm_ipls_ms, lm_ipls_Ps, "LM-IPLS")) # tikz_results(states, meas, results) plot_results(states, meas, results)
info_file=config.TRN_INFO_FILE, graph_file=config.TRN_GRAPH_FILE, hist_file=config.TRN_HIST_FILE, ft_hist_file=config.TRN_FT_HIST_FILE, input_size=config.TRN_INPUT_SIZE, dense_dims=config.TRN_DENSE_DIMS, lr=config.TRN_LR, ft_lr=config.TRN_FT_LR, min_lr=config.TRN_MIN_LR, min_ft_lr=config.TRN_MIN_FT_LR, batch_size=config.TRN_BATCH_SIZE, reuse_count=config.TRN_REUSE_CNT, epochs=config.TRN_EPOCHS, valid_rate=config.TRN_VALID_RATE, es_patience=config.TRN_ES_PATIENCE, lr_patience=config.TRN_LR_PATIENCE, ft_start=config.TRN_FT_START ) maker.execute() if __name__ == '__main__': NOW = datetime.datetime.now().strftime('%Y-%m-%d_%H:%M:%S') # TODO loggerの出力先の設定 # logger = setup_logger('./logs/train_{0}.log'.format(NOW)) logger = setup_logger() logger.info('Training step starts') # TODO ModelMakerの引数をlogファイルに出力する train()
def main(): args = parse_args() log = logging.getLogger(__name__) experiment_name = "ct_experiment_realisation" setup_logger(f"logs/{experiment_name}.log", logging.DEBUG) log.info(f"Running experiment: {experiment_name}") if not args.random: np.random.seed(2) dt = 0.01 qc = 0.01 qw = 10 Q = np.array([ [qc * dt**3 / 3, 0, qc * dt**2 / 2, 0, 0], [0, qc * dt**3 / 3, 0, qc * dt**2 / 2, 0], [qc * dt**2 / 2, 0, qc * dt, 0, 0], [0, qc * dt**2 / 2, 0, qc * dt, 0], [0, 0, 0, 0, dt * qw], ]) motion_model = CoordTurn(dt, Q) sens_pos_1 = np.array([-1.5, 0.5]) sens_pos_2 = np.array([1, 1]) sensors = np.row_stack((sens_pos_1, sens_pos_2)) std = 0.5 R = std**2 * np.eye(2) prior_mean = np.array([0, 0, 1, 0, 0]) prior_cov = np.diag([0.1, 0.1, 1, 1, 1]) lambda_ = 1e-0 num_iter = args.num_iter if args.meas_type == MeasType.Range: meas_model = MultiSensorRange(sensors, R) meas_cols = np.array([0, 1]) elif args.meas_type == MeasType.Bearings: meas_model = MultiSensorBearings(sensors, R) meas_cols = np.array([2, 3]) log.info("Generating states and measurements.") if args.random: states, measurements = simulate_data(motion_model, meas_model, prior_mean[:-1], time_steps=500) else: states, all_meas, _, xs_ss = get_specific_states_from_file( Path.cwd() / "data/lm_ieks_paper", Type.LM, num_iter) measurements = all_meas[:, meas_cols] if args.var_sensors: single_sensor = sens_pos_2.reshape((1, 2)) std = 0.001 R_certain = std**2 * np.eye(1) single_meas_model = MultiSensorBearings(single_sensor, R_certain) # Create a set of time steps where the original two sensor measurements are replaced with single ones. # single_meas_time_steps = set(list(range(0, 100, 5))[1:]) single_meas_time_steps = set(list(range(0, 500, 50))[1:]) meas_model = BearingsVaryingSensors(meas_model, single_meas_model, single_meas_time_steps) # Change measurments so that some come from the alternative model. measurements = modify_meas(measurements, states, meas_model, True) results = [] cost_fn_eks = partial( analytical_smoothing_cost_time_dep, measurements=measurements, m_1_0=prior_mean, P_1_0=prior_cov, motion_model=motion_model, meas_model=meas_model, ) D_x = prior_mean.shape[0] K = len(measurements) init_traj = (np.zeros((K, D_x)), np.array(K * [prior_cov])) # log.info("Running IEKS...") # ms_ieks, Ps_ieks, cost_ieks, rmses_ieks, neeses_ieks = run_smoothing( # Ieks(motion_model, meas_model, num_iter), states, measurements, prior_mean, prior_cov, cost_fn_eks, init_traj # ) # results.append( # (ms_ieks, Ps_ieks, cost_ieks[1:], "IEKS"), # ) # log.info("Running LM-IEKS...") # ms_lm_ieks, Ps_lm_ieks, cost_lm_ieks, rmses_lm_ieks, neeses_lm_ieks = run_smoothing( # LmIeks(motion_model, meas_model, num_iter, 10, lambda_, 10), # states, # measurements, # prior_mean, # prior_cov, # cost_fn_eks, # init_traj, # ) # results.append( # (ms_lm_ieks, Ps_lm_ieks, cost_lm_ieks[1:], "LM-IEKS"), # ) log.info("Running LS-IEKS...") dir_der_eks = partial( dir_der_analytical_smoothing_cost, measurements=measurements, m_1_0=prior_mean, P_1_0=prior_cov, motion_model=motion_model, meas_model=meas_model, ) ms_ls_ieks, Ps_ls_ieks, cost_ls_ieks, rmses_ls_ieks, neeses_ls_ieks = run_smoothing( LsIeks( motion_model, meas_model, num_iter, ArmijoLineSearch(cost_fn_eks, dir_der_eks, c_1=0.1), ), states, measurements, prior_mean, prior_cov, cost_fn_eks, init_traj, ) results.append((ms_ls_ieks, Ps_ls_ieks, cost_ls_ieks[1:], "LS-IEKS"), ) sigma_point_method = SphericalCubature() # cost_fn_ipls = partial( # slr_smoothing_cost_pre_comp, measurements=measurements, m_1_0=prior_mean, P_1_0_inv=np.linalg.inv(prior_cov) # ) # log.info("Running IPLS...") # ms_ipls, Ps_ipls, cost_ipls, rmses_ipls, neeses_ipls = run_smoothing( # SigmaPointIpls(motion_model, meas_model, sigma_point_method, num_iter), # states, # measurements, # prior_mean, # prior_cov, # None, # init_traj, # ) # results.append( # (ms_ipls, Ps_ipls, cost_ipls, "IPLS"), # ) # log.info("Running LM-IPLS...") # ms_lm_ipls, Ps_lm_ipls, cost_lm_ipls, rmses_lm_ipls, neeses_lm_ipls = run_smoothing( # SigmaPointLmIpls( # motion_model, meas_model, sigma_point_method, num_iter, cost_improv_iter_lim=10, lambda_=lambda_, nu=10 # ), # states, # measurements, # prior_mean, # prior_cov, # cost_fn_ipls, # init_traj, # ) # results.append( # (ms_lm_ipls, Ps_lm_ipls, cost_lm_ipls, "LM-IPLS"), # ) # cost_fn_ls_ipls = partial( # slr_smoothing_cost_means, # measurements=measurements, # m_1_0=prior_mean, # P_1_0_inv=np.linalg.inv(prior_cov), # motion_fn=motion_model.map_set, # meas_fn=meas_model.map_set, # slr_method=SigmaPointSlr(sigma_point_method), # ) # log.info("Running LS-IPLS...") # ms_ls_ipls, Ps_ls_ipls, cost_ls_ipls, rmses_ls_ipls, neeses_ls_ipls = run_smoothing( # SigmaPointLsIpls( # motion_model, meas_model, sigma_point_method, num_iter, partial(ArmijoLineSearch, c_1=0.1), 10 # ), # states, # measurements, # prior_mean, # prior_cov, # cost_fn_ls_ipls, # init_traj, # ) # results.append( # (ms_ls_ipls, Ps_ls_ipls, cost_ls_ipls, "LS-IPLS"), # ) for ms, _, _, label in results: tikz_2d_traj(Path.cwd() / "tikz", ms[:, :2], label) plot_results( states, results, None, )
def main(): log = logging.getLogger(__name__) experiment_name = "coord_turn" setup_logger(f"logs/{experiment_name}.log", logging.INFO) log.info(f"Running experiment: {experiment_name}") np.random.seed(2) range_ = (0, -1) num_iter = 5 # Motion model sampling_period = 0.1 v_scale = 2 omega_scale = 2 sigma_v = v_scale * 1 sigma_omega = omega_scale * np.pi / 180 Q = np.diag([ 0, 0, sampling_period * sigma_v**2, 0, sampling_period * sigma_omega**2 ]) motion_model = CoordTurn(sampling_period, Q) # Meas model pos = np.array([100, -100]) sigma_r = 2 sigma_phi = 0.5 * np.pi / 180 R = np.diag([sigma_r**2, sigma_phi**2]) meas_model = RangeBearing(pos, R) # Generate data true_states, measurements = get_tricky_data(meas_model, R, range_) obs_dims = true_states.shape[1] cartes_meas = np.apply_along_axis(partial(to_cartesian_coords, pos=pos), 1, measurements) # Prior distr. prior_mean = np.array([4.4, 0, 4, 0, 0]) prior_cov = np.diag( [1**2, 1**2, 1**2, (5 * np.pi / 180)**2, (1 * np.pi / 180)**2]) cost_fn_ipls = partial( slr_smoothing_cost, measurements=measurements, m_1_0=prior_mean, P_1_0=prior_cov, motion_model=motion_model, meas_model=meas_model, slr=SigmaPointSlr(SphericalCubature()), ) smoother = SigmaPointIpls(motion_model, meas_model, SphericalCubature(), num_iter) mf, Pf, ms, Ps, _ = smoother.filter_and_smooth(measurements, prior_mean, prior_cov, cost_fn_ipls) vis.plot_nees_and_2d_est( true_states[range_[0]:range_[1], :], cartes_meas, [ (mf[:, :obs_dims], Pf[:, :obs_dims, :obs_dims], "filter"), (ms[:, :obs_dims], Ps[:, :obs_dims, :obs_dims], "smoother"), ], sigma_level=3, skip_cov=5, )
def main(): log = logging.getLogger(__name__) experiment_name = "tunnel_simulation" setup_logger(f"logs/{experiment_name}.log", logging.WARNING) log.info(f"Running experiment: {experiment_name}") np.random.seed(2) num_iter = 3 # Meas model pos = np.array([100, -100]) # sigma_r = 2 # sigma_phi = 0.5 * np.pi / 180 sigma_r = 4 sigma_phi = 1 * np.pi / 180 R = np.diag([sigma_r**2, sigma_phi**2]) meas_model = RangeBearing(pos, R) # Generate data range_ = (0, None) tunnel_segment = [145, 165] # tunnel_segment = [None, None] states, measurements = get_states_and_meas(meas_model, R, range_, tunnel_segment) cartes_meas = np.apply_along_axis(partial(to_cartesian_coords, pos=pos), 1, measurements) prior_mean = np.array([0, 0, 1, 0, 0]) prior_cov = np.diag([0.1, 0.1, 1, 1, 1]) results = [] sigma_point_method = SphericalCubature() # cost_fn_ipls = partial( # slr_smoothing_cost_pre_comp, # measurements=measurements, # m_1_0=prior_mean, # P_1_0=prior_cov, # motion_model=motion_model, # meas_model=meas_model, # slr=SigmaPointSlr(sigma_point_method), # ) vs = np.array([3, 4, 5, 6, 7]) os = np.array([15, 17.5, 20, 22.5, 25]) rmses = np.empty((vs.shape[0], os.shape[0])) sampling_period = 0.1 eps = 0.1 # v_scale = 2 # omega_scale = 2 for v_iter, v_scale in enumerate(vs): for o_iter, omega_scale in enumerate(os): # Motion model sigma_v = v_scale * 1 sigma_omega = omega_scale * np.pi / 180 Q = np.diag([ eps, eps, sampling_period * sigma_v**2, eps, sampling_period * sigma_omega**2 ]) motion_model = CoordTurn(sampling_period, Q) cost_fn_eks = partial( analytical_smoothing_cost, meas=measurements, m_1_0=prior_mean, P_1_0=prior_cov, motion_model=motion_model, meas_model=meas_model, ) ms_gn_ieks, Ps_gn_ieks, cost_gn_ieks, tmp_rmse, tmp_nees = run_smoothing( Ieks(motion_model, meas_model, num_iter), states, measurements, prior_mean, prior_cov, cost_fn_eks) tmp = rmse(ms_gn_ieks[:, :2], states) print(v_scale, omega_scale, tmp) rmses[v_iter, o_iter] = tmp fig = plt.figure() ax = fig.gca(projection="3d") X, Y = np.meshgrid(vs, os) surf = ax.plot_surface(X, Y, rmses, linewidth=0, antialiased=False) from matplotlib.ticker import LinearLocator, FormatStrFormatter ax.zaxis.set_major_locator(LinearLocator(10)) ax.zaxis.set_major_formatter(FormatStrFormatter("%.02f")) ax.set_xlabel("v") ax.set_ylabel("o") # Add a color bar which maps values to colors. fig.colorbar(surf, shrink=0.5, aspect=5) plt.show() results.append((ms_gn_ieks, Ps_gn_ieks, cost_gn_ieks[1:], "GN-IEKS")) # ms_gn_ipls, Ps_gn_ipls, cost_gn_ipls, rmses_gn_ipls, neeses_gn_ipls = run_smoothing( # SigmaPointIpls(motion_model, meas_model, sigma_point_method, num_iter), # states, # measurements, # prior_mean, # prior_cov, # cost_fn_ipls, # None, # ) # results.append((ms_lm_ipls, Ps_lm_ipls, cost_lm_ipls[1:], "LM-IPLS")) plot_results( states, results, cartes_meas, ) plot_metrics( [ (cost_gn_ieks[1:], "GN-IEKS"), (cost_lm_ieks[1:], "LM-IEKS"), (cost_gn_ipls[1:], "GN-IPLS"), (cost_lm_ipls[1:], "LM-IPLS"), ], [ (rmses_gn_ieks, "GN-IEKS"), (rmses_lm_ieks, "LM-IEKS"), (rmses_gn_ipls, "LM-IPLS"), (rmses_lm_ipls, "LM-IPLS"), ], [ (neeses_gn_ieks, "GN-IEKS"), (neeses_lm_ieks, "LM-IEKS"), (neeses_gn_ipls, "LM-IPLS"), (neeses_lm_ipls, "LM-IPLS"), ], )
def train_distilled_network_dirichlet( model_dir="models/distilled_model_cifar10_dirichlet"): """Distill ensemble with distribution distillation (Dirichlet) """ args = utils.parse_args() log_file = Path("{}.log".format(datetime.now().strftime('%Y%m%d_%H%M%S'))) utils.setup_logger(log_path=Path.cwd() / args.log_dir / log_file, log_level=args.log_level) data_ind = np.load( "src/experiments/cifar10/training_files/training_data_indices.npy") num_train_points = 40000 train_ind = data_ind[:num_train_points] valid_ind = data_ind[num_train_points:] train_data = cifar10_ensemble_pred.Cifar10Data(ind=train_ind, augmentation=True) valid_data = cifar10_ensemble_pred.Cifar10Data(ind=valid_ind) train_loader = torch.utils.data.DataLoader(train_data.set, batch_size=100, shuffle=True, num_workers=0) valid_loader = torch.utils.data.DataLoader(valid_data.set, batch_size=100, shuffle=True, num_workers=0) test_data = cifar10_ensemble_pred.Cifar10Data(train=False) test_loader = torch.utils.data.DataLoader(test_data.set, batch_size=64, shuffle=True, num_workers=0) ensemble_size = 10 # Note that the ensemble predictions are assumed to have been saved to file (see ensemble_predictions.py), # ensemble_indices.npy contains the order of the ensemble members such that ind[:ensemble_size] are the indices # of the first ensemble, ind[ensemble_size:2*ensemble_size] are the indices of the second ensemble and so on ind = np.load("src/experiments/cifar10/training_files/ensemble_indices.npy" )[((args.rep - 1) * ensemble_size):(args.rep * ensemble_size)] ensemble = ensemble_wrapper.EnsembleWrapper(output_size=10, indices=ind) device = utils.torch_settings(args.seed, args.gpu) distilled_model = cifar_resnet_dirichlet.CifarResnetDirichlet( ensemble, resnet_utils.BasicBlock, [3, 2, 2, 2], device=device, learning_rate=args.lr) loss_metric = metrics.Metric(name="Mean loss", function=distilled_model.calculate_loss) distilled_model.add_metric(loss_metric) distilled_model.train(train_loader, num_epochs=args.num_epochs, validation_loader=valid_loader) distilled_model.eval_mode() predicted_distribution = [] all_labels = [] for batch in test_loader: inputs, labels = batch inputs, labels = inputs[0].to(distilled_model.device), labels.to( distilled_model.device) predicted_distribution.append( distilled_model.predict(inputs).to(distilled_model.device)) all_labels.append(labels.long()) test_acc = metrics.accuracy(torch.cat(predicted_distribution), torch.cat(all_labels)) LOGGER.info("Test accuracy is {}".format(test_acc)) torch.save(distilled_model.state_dict(), model_dir)
def main(): log = logging.getLogger(__name__) experiment_name = "lm_ieks" setup_logger(f"logs/{experiment_name}.log", logging.WARNING) log.info(f"Running experiment: {experiment_name}") dt = 0.01 qc = 0.01 qw = 10 Q = np.array([ [qc * dt**3 / 3, 0, qc * dt**2 / 2, 0, 0], [0, qc * dt**3 / 3, 0, qc * dt**2 / 2, 0], [qc * dt**2 / 2, 0, qc * dt, 0, 0], [0, qc * dt**2 / 2, 0, qc * dt, 0], [0, 0, 0, 0, dt * qw], ]) motion_model = CoordTurn(dt, Q) sens_pos_1 = np.array([-1.5, 0.5]) sens_pos_2 = np.array([1, 1]) sensors = np.row_stack((sens_pos_1, sens_pos_2)) std = 0.5 R = std**2 * np.eye(2) prior_mean = np.array([0, 0, 1, 0, 0]) prior_cov = np.diag([0.1, 0.1, 1, 1, 1]) num_iter = 1 np.random.seed(0) states, all_meas, _, xs_ss = get_specific_states_from_file( Path.cwd() / "data/lm_ieks_paper", Type.LM, num_iter) K = all_meas.shape[0] covs = np.array([prior_cov] * K) * (0.90 + np.random.rand() / 5) meas_model = MultiSensorRange(sensors, R) measurements = all_meas[:, :2] cost_fn_eks = partial( analytical_smoothing_cost, measurements=measurements, m_1_0=prior_mean, P_1_0=prior_cov, motion_model=motion_model, meas_model=meas_model, ) dir_der_eks = partial( dir_der_analytical_smoothing_cost, measurements=measurements, m_1_0=prior_mean, P_1_0=prior_cov, motion_model=motion_model, meas_model=meas_model, ) sigma_point_method = SphericalCubature() cost_fn_ipls = partial( slr_smoothing_cost_pre_comp, measurements=measurements, m_1_0=prior_mean, P_1_0_inv=np.linalg.inv(prior_cov), ) time_ieks = partial( Ieks(motion_model, meas_model, num_iter).filter_and_smooth_with_init_traj, measurements, prior_mean, prior_cov, (xs_ss, covs), 1, noop_cost, ) time_lm_ieks = partial( LmIeks(motion_model, meas_model, num_iter, 10, 1e-2, 10).filter_and_smooth_with_init_traj, measurements, prior_mean, prior_cov, (xs_ss, covs), 1, cost_fn_eks, ) time_ls_ieks = partial( LsIeks( motion_model, meas_model, num_iter, ArmijoLineSearch(cost_fn_eks, dir_der_eks, c_1=0.1), ).filter_and_smooth_with_init_traj, measurements, prior_mean, prior_cov, (xs_ss, covs), 1, cost_fn_eks, ) time_ipls = partial( SigmaPointIpls(motion_model, meas_model, sigma_point_method, num_iter).filter_and_smooth_with_init_traj, measurements, prior_mean, prior_cov, (xs_ss, covs), 1, slr_noop_cost, ) time_lm_ipls = partial( SigmaPointLmIpls(motion_model, meas_model, sigma_point_method, num_iter, cost_improv_iter_lim=10, lambda_=1e-2, nu=10).filter_and_smooth_with_init_traj, measurements, prior_mean, prior_cov, (xs_ss, covs), 1, cost_fn_ipls, ) cost_fn_ls_ipls = partial( slr_smoothing_cost_means, measurements=measurements, m_1_0=prior_mean, P_1_0_inv=np.linalg.inv(prior_cov), motion_fn=motion_model.map_set, meas_fn=meas_model.map_set, slr_method=SigmaPointSlr(sigma_point_method), ) time_ls_ipls = partial( SigmaPointLsIpls(motion_model, meas_model, sigma_point_method, num_iter, partial(ArmijoLineSearch, c_1=0.1), 10).filter_and_smooth_with_init_traj, measurements, prior_mean, prior_cov, (xs_ss, covs), 1, cost_fn_ls_ipls, ) num_samples = 10 time_ieks = timeit(time_ieks, number=num_samples) / (num_iter * num_samples) time_lm_ieks = timeit(time_lm_ieks, number=num_samples) / (num_iter * num_samples) time_ls_ieks = timeit(time_ls_ieks, number=num_samples) / (num_iter * num_samples) time_ipls = timeit(time_ipls, number=num_samples) / (num_iter * num_samples) time_lm_ipls = timeit(time_lm_ipls, number=num_samples) / (num_iter * num_samples) time_ls_ipls = timeit(time_ls_ipls, number=num_samples) / (num_iter * num_samples) print(f"IEKS: {time_ieks:.2f} s, 100.0%") print(f"LM-IEKS: {time_lm_ieks:.2f} s, {time_lm_ieks/time_ieks*100:.2f}%") print(f"LS-IEKS: {time_ls_ieks:.2f} s, {time_ls_ieks/time_ieks*100:.2f}%") print(f"IPLS: {time_ipls:.2f} s, {time_ipls/time_ieks*100:.2f}%") print(f"LM-IPLS: {time_lm_ipls:.2f} s, {time_lm_ipls/time_ieks*100:.2f}%") print(f"LS-IPLS: {time_ls_ipls:.2f} s, {time_ls_ipls/time_ieks*100:.2f}%")
def main(): args = parse_args() log = logging.getLogger(__name__) experiment_name = "analyse_tricky_ct" setup_logger(f"logs/{experiment_name}.log", logging.DEBUG) log.info(f"Running experiment: {experiment_name}") dt = 0.01 qc = 0.01 qw = 10 Q = np.array([ [qc * dt**3 / 3, 0, qc * dt**2 / 2, 0, 0], [0, qc * dt**3 / 3, 0, qc * dt**2 / 2, 0], [qc * dt**2 / 2, 0, qc * dt, 0, 0], [0, qc * dt**2 / 2, 0, qc * dt, 0], [0, 0, 0, 0, dt * qw], ]) motion_model = CoordTurn(dt, Q) sens_pos_1 = np.array([-1.5, 0.5]) sens_pos_2 = np.array([1, 1]) sensors = np.row_stack((sens_pos_1, sens_pos_2)) std = 0.5 R = std**2 * np.eye(2) prior_mean = np.array([0, 0, 1, 0, 0]) prior_cov = np.diag([0.1, 0.1, 1, 1, 1]) lambda_ = 1e-2 num_iter = args.num_iter meas_model = MultiSensorBearings(sensors, R) data_dir = Path.cwd() / "data/lm_ieks_paper/tricky" states = np.genfromtxt(data_dir / "lm_div_states.csv", dtype=float) measurements = np.genfromtxt(data_dir / "lm_div_meas.csv", dtype=float) estimates = [] costs = [] neeses = [] rmses = [] cost_fn_eks = partial( analytical_smoothing_cost, measurements=measurements, m_1_0=prior_mean, P_1_0=prior_cov, motion_model=motion_model, meas_model=meas_model, ) ms_ieks, Ps_ieks, cost_ieks, rmses_ieks, neeses_ieks = run_smoothing( Ieks(motion_model, meas_model, num_iter), states, measurements, prior_mean, prior_cov, cost_fn_eks, ) estimates.append((ms_ieks, Ps_ieks, cost_ieks[1:], "IEKS"), ) costs.append((cost_ieks, "IEKS"), ) rmses.append((rmses_ieks, "IEKS"), ) neeses.append((neeses_ieks, "IEKS"), ) ms_lm_ieks, Ps_lm_ieks, cost_lm_ieks, rmses_lm_ieks, neeses_lm_ieks = run_smoothing( LmIeks(motion_model, meas_model, num_iter, 10, lambda_, 10), states, measurements, prior_mean, prior_cov, cost_fn_eks, ) estimates.append((ms_lm_ieks, Ps_lm_ieks, cost_lm_ieks[1:], "LM-IEKS"), ) costs.append((cost_lm_ieks, "LM-IEKS"), ) rmses.append((rmses_lm_ieks, "LM-IEKS"), ) neeses.append((neeses_lm_ieks, "LM-IEKS"), ) # ms_ls_ieks, Ps_ls_ieks, cost_ls_ieks, rmses_ls_ieks, neeses_ls_ieks = run_smoothing( # LsIeks(motion_model, meas_model, num_iter, GridSearch(cost_fn_eks, 20)), # states, # measurements, # prior_mean, # prior_cov, # cost_fn_eks, # ) # estimates.append( # (ms_ls_ieks, Ps_ls_ieks, cost_ls_ieks[1:], "LS-IEKS"), # ) # costs.append( # (cost_ls_ieks, "LS-IEKS"), # ) # rmses.append( # (rmses_ls_ieks, "LS-IEKS"), # ) # neeses.append( # (neeses_ls_ieks, "LS-IEKS"), # ) # sigma_point_method = SphericalCubature() # ls_cost_fn = partial( # slr_smoothing_cost_means, # measurements=measurements, # m_1_0=prior_mean, # P_1_0_inv=np.linalg.inv(prior_cov), # motion_fn=motion_model.map_set, # meas_fn=meas_model.map_set, # slr_method=SigmaPointSlr(sigma_point_method), # ) # pre_comp_cost = partial( # slr_smoothing_cost_pre_comp, # measurements=measurements, # m_1_0=prior_mean, # P_1_0_inv=np.linalg.inv(prior_cov), # ) # ms_ipls, Ps_ipls, cost_ipls, rmses_ipls, neeses_ipls = run_smoothing( # SigmaPointIpls(motion_model, meas_model, sigma_point_method, num_iter), # states, # measurements, # prior_mean, # prior_cov, # pre_comp_cost, # ) # estimates.append( # (ms_ipls, Ps_ipls, cost_ipls, "IPLS"), # ) # costs.append( # (cost_ipls, "IPLS"), # ) # rmses.append( # (rmses_ipls, "IPLS"), # ) # neeses.append( # (neeses_ipls, "IPLS"), # ) # ms_lm_ipls, Ps_lm_ipls, cost_lm_ipls, rmses_lm_ipls, neeses_lm_ipls = run_smoothing( # SigmaPointLmIpls( # motion_model, meas_model, sigma_point_method, num_iter, cost_improv_iter_lim=10, lambda_=lambda_, nu=10 # ), # states, # measurements, # prior_mean, # prior_cov, # pre_comp_cost, # ) # estimates.append( # (ms_lm_ipls, Ps_lm_ipls, cost_lm_ipls, "LM-IPLS"), # ) # costs.append( # (cost_lm_ipls, "LM-IPLS"), # ) # rmses.append( # (rmses_lm_ipls, "LM-IPLS"), # ) # neeses.append( # (neeses_lm_ipls, "LM-IPLS"), # ) # ms_ls_ipls, Ps_ls_ipls, cost_ls_ipls, rmses_ls_ipls, neeses_ls_ipls = run_smoothing( # SigmaPointLsIpls(motion_model, meas_model, sigma_point_method, num_iter, GridSearch, 10), # states, # measurements, # prior_mean, # prior_cov, # ls_cost_fn, # ) # estimates.append( # (ms_ls_ipls, Ps_ls_ipls, cost_ls_ipls, "LS-IPLS"), # ) # costs.append( # (cost_ls_ipls, "LS-IPLS"), # ) # rmses.append( # (rmses_ls_ipls, "LS-IPLS"), # ) # neeses.append( # (neeses_ls_ipls, "LS-IPLS"), # ) plot_scalar_metric(costs, "Cost") plot_scalar_metric(neeses, "NEES") plot_scalar_metric(rmses, "RMSE") plot_results( states, estimates, None, )
import numpy as np import os import joblib import re import json from cv2 import imread import time from multiprocessing import Pool # My files import config as Config import src.face_detector as face_detector from src.utils import resize_cv2_image, get_binary_thresholded_image, convert_to_gray_image, setup_logger, attach_logger_to_stdout from src.data_object import DataObject dp_logger = setup_logger('dp_logger', './logs/data_processing.log') # class DataCollectionSession: # def __init__(self, session_info, session_number): # self.screen_size = session_info["screen_size"] # self.data = self._get_data(session_number) # def _get_data(self, session_number): # data_path = os.path.join(os.getcwd(), data_directory_path) # sessions_path = os.path.join(data_path, "sessions") # images_path = os.path.join(data_path, "images") # with open(os.path.join(sessions_path, f"session_{session_number}.json")) as f: # session_items = json.load(f) # data = [None] * len(session_items)
#!/usr/bin/env python3 from src.hue import Hue from src.network import Network from src.utils import setup_logger def version(): import logging logger = logging.getLogger("main") logger.info(f'Hue geofencing version {open("VERSION", "r").read()}') if __name__ == "__main__": setup_logger() version() hue = Hue() network = Network(callback_leave=hue.set_leave_home, callback_join=hue.set_arrive)
def main(): log = logging.getLogger(__name__) experiment_name = "tunnel_simulation" setup_logger(f"logs/{experiment_name}.log", logging.DEBUG) log.info(f"Running experiment: {experiment_name}") np.random.seed(2) num_iter = 10 # Motion model sampling_period = 0.1 v_scale = 7 omega_scale = 15 sigma_v = v_scale * 1 sigma_omega = omega_scale * np.pi / 180 eps = 0.1 Q = np.diag([eps, eps, sampling_period * sigma_v ** 2, eps, sampling_period * sigma_omega ** 2]) motion_model = CoordTurn(sampling_period, Q) # Meas model pos = np.array([100, -100]) # sigma_r = 2 # sigma_phi = 0.5 * np.pi / 180 noise_factor = 4 sigma_r = 2 * noise_factor sigma_phi = noise_factor * 0.5 * np.pi / 180 R = np.diag([sigma_r ** 2, sigma_phi ** 2]) meas_model = RangeBearing(pos, R) # Generate data range_ = (0, None) tunnel_segment = [140, 175] # tunnel_segment = [None, None] states, measurements = get_states_and_meas(meas_model, R, range_, tunnel_segment) measurements = [meas for meas in measurements] cartes_meas = np.apply_along_axis(partial(to_cartesian_coords, pos=pos), 1, measurements) prior_mean = np.array([0, 0, 1, 0, 0]) prior_cov = np.diag([0.1, 0.1, 1, 1, 1]) lambda_ = 1e-2 nu = 10 grid_search_points = 10 results = [] cost_fn_eks = partial( analytical_smoothing_cost, measurements=measurements, m_1_0=prior_mean, P_1_0=prior_cov, motion_model=motion_model, meas_model=meas_model, ) sigma_point_method = SphericalCubature() cost_fn_ipls = partial( slr_smoothing_cost_pre_comp, measurements=measurements, m_1_0=prior_mean, P_1_0_inv=np.linalg.inv(prior_cov), ) ms_gn_ieks, Ps_gn_ieks, cost_ieks, rmses_ieks, neeses_ieks = run_smoothing( Ieks(motion_model, meas_model, num_iter), states, measurements, prior_mean, prior_cov, cost_fn_eks ) results.append((ms_gn_ieks, Ps_gn_ieks, cost_ieks[1:], "IEKS")) ms_lm_ieks, Ps_lm_ieks, cost_lm_ieks, rmses_lm_ieks, neeses_lm_ieks = run_smoothing( LmIeks(motion_model, meas_model, num_iter, cost_improv_iter_lim=10, lambda_=lambda_, nu=nu), states, measurements, prior_mean, prior_cov, cost_fn_eks, ) results.append((ms_lm_ieks, Ps_lm_ieks, cost_lm_ieks[1:], "LM-IEKS")) ms_ls_ieks, Ps_ls_ieks, cost_ls_ieks, tmp_rmse, tmp_nees = run_smoothing( LsIeks(motion_model, meas_model, num_iter, GridSearch(cost_fn_eks, grid_search_points)), states, measurements, prior_mean, prior_cov, cost_fn_eks, ) results.append((ms_ls_ieks, Ps_ls_ieks, cost_ls_ieks[1:], "LS-IEKS")) ms_gn_ipls, Ps_gn_ipls, cost_ipls, rmses_ipls, neeses_ipls = run_smoothing( SigmaPointIpls(motion_model, meas_model, sigma_point_method, num_iter), states, measurements, prior_mean, prior_cov, cost_fn_ipls, ) results.append((ms_gn_ipls, Ps_gn_ipls, cost_ipls[1:], "IPLS")) ms_lm_ipls, Ps_lm_ipls, cost_lm_ipls, rmses_lm_ipls, neeses_lm_ipls = run_smoothing( SigmaPointLmIpls( motion_model, meas_model, sigma_point_method, num_iter, cost_improv_iter_lim=10, lambda_=lambda_, nu=nu ), states, measurements, prior_mean, prior_cov, cost_fn_ipls, ) results.append((ms_lm_ipls, Ps_lm_ipls, cost_lm_ipls[1:], "LM-IPLS")) ls_cost_fn = partial( slr_smoothing_cost_means, measurements=measurements, m_1_0=prior_mean, P_1_0_inv=np.linalg.inv(prior_cov), motion_fn=motion_model.map_set, meas_fn=meas_model.map_set, slr_method=SigmaPointSlr(sigma_point_method), ) ms_ls_ipls, Ps_ls_ipls, cost_ls_ipls, tmp_rmse, tmp_nees = run_smoothing( SigmaPointLsIpls(motion_model, meas_model, sigma_point_method, num_iter, GridSearch, grid_search_points), states, measurements, prior_mean, prior_cov, ls_cost_fn, ) results.append((ms_ls_ipls, Ps_ls_ipls, cost_ls_ipls[1:], "LS-IPLS")) plot_results( states, results, cartes_meas, skip_cov=10, ) plot_metrics( [ (cost_ieks[1:], "IEKS"), (cost_lm_ieks[1:], "LM-IEKS"), (cost_ipls[1:], "IPLS"), (cost_lm_ipls[0:], "LM-IPLS"), ], [ (rmses_ieks, "IEKS"), (rmses_lm_ieks, "LM-IEKS"), (rmses_ipls, "IPLS"), (rmses_lm_ipls, "LM-IPLS"), ], [ (neeses_ieks, "IEKS"), (neeses_lm_ieks, "LM-IEKS"), (neeses_ipls, "IPLS"), (neeses_lm_ipls, "LM-IPLS"), ], )
def main(): log = logging.getLogger(__name__) args = parse_args() experiment_name = "tunnel_simulation" setup_logger(f"logs/{experiment_name}.log", logging.INFO) log.info(f"Running experiment: {experiment_name}") np.random.seed(2) num_iter = args.num_iter # Motion model sampling_period = 0.1 v_scale = 7 omega_scale = 15 sigma_v = v_scale * 1 sigma_omega = omega_scale * np.pi / 180 eps = 0.1 Q = np.diag([ eps, eps, sampling_period * sigma_v**2, eps, sampling_period * sigma_omega**2 ]) motion_model = CoordTurn(sampling_period, Q) # Meas model pos = np.array([100, -100]) # sigma_r = 2 # sigma_phi = 0.5 * np.pi / 180 noise_factor = 4 sigma_r = 2 * noise_factor sigma_phi = noise_factor * 0.5 * np.pi / 180 R = np.diag([sigma_r**2, sigma_phi**2]) meas_model = RangeBearing(pos, R) # Generate data range_ = (0, None) tunnel_segment = [140, 175] # tunnel_segment = [None, None] prior_mean = np.array([0, 0, 1, 0, 0]) prior_cov = np.diag([0.1, 0.1, 1, 1, 1]) lambda_ = 1e-2 nu = 10 grid_search_points = 10 num_mc_samples = args.num_mc_samples rmses_ieks = np.zeros((num_mc_samples, num_iter)) rmses_lm_ieks = np.zeros((num_mc_samples, num_iter)) rmses_ls_ieks = np.zeros((num_mc_samples, num_iter)) rmses_ipls = np.zeros((num_mc_samples, num_iter)) rmses_lm_ipls = np.zeros((num_mc_samples, num_iter)) rmses_ls_ipls = np.zeros((num_mc_samples, num_iter)) neeses_gn_ieks = np.zeros((num_mc_samples, num_iter)) neeses_lm_ieks = np.zeros((num_mc_samples, num_iter)) neeses_ls_ieks = np.zeros((num_mc_samples, num_iter)) neeses_gn_ipls = np.zeros((num_mc_samples, num_iter)) neeses_lm_ipls = np.zeros((num_mc_samples, num_iter)) neeses_ls_ipls = np.zeros((num_mc_samples, num_iter)) for mc_iter in range(num_mc_samples): log.info(f"MC iter: {mc_iter+1}/{num_mc_samples}") states, measurements = get_states_and_meas(meas_model, R, range_, tunnel_segment) cost_fn_eks = partial( analytical_smoothing_cost, measurements=measurements, m_1_0=prior_mean, P_1_0=prior_cov, motion_model=motion_model, meas_model=meas_model, ) sigma_point_method = SphericalCubature() cost_fn_ipls = partial( slr_smoothing_cost_pre_comp, measurements=measurements, m_1_0=prior_mean, P_1_0_inv=np.linalg.inv(prior_cov), ) ms_gn_ieks, Ps_gn_ieks, cost_gn_ieks, tmp_rmse, tmp_nees = run_smoothing( Ieks(motion_model, meas_model, num_iter), states, measurements, prior_mean, prior_cov, cost_fn_eks) rmses_ieks[mc_iter, :] = tmp_rmse neeses_gn_ieks[mc_iter, :] = tmp_nees ms_lm_ieks, Ps_lm_ieks, cost_lm_ieks, tmp_rmse, tmp_nees = run_smoothing( LmIeks(motion_model, meas_model, num_iter, cost_improv_iter_lim=10, lambda_=lambda_, nu=nu), states, measurements, prior_mean, prior_cov, cost_fn_eks, ) rmses_lm_ieks[mc_iter, :] = tmp_rmse neeses_lm_ieks[mc_iter, :] = tmp_nees ms_ls_ieks, Ps_ls_ieks, cost_ls_ieks, tmp_rmse, tmp_nees = run_smoothing( LsIeks(motion_model, meas_model, num_iter, GridSearch(cost_fn_eks, grid_search_points)), states, measurements, prior_mean, prior_cov, cost_fn_eks, ) rmses_ls_ieks[mc_iter, :] = tmp_rmse neeses_ls_ieks[mc_iter, :] = tmp_nees ms_gn_ipls, Ps_gn_ipls, cost_gn_ipls, tmp_rmse, tmp_nees = run_smoothing( SigmaPointIpls(motion_model, meas_model, sigma_point_method, num_iter), states, measurements, prior_mean, prior_cov, None, ) rmses_ipls[mc_iter, :] = tmp_rmse neeses_gn_ipls[mc_iter, :] = tmp_nees ms_lm_ipls, Ps_lm_ipls, cost_lm_ipls, tmp_rmse, tmp_nees = run_smoothing( SigmaPointLmIpls(motion_model, meas_model, sigma_point_method, num_iter, cost_improv_iter_lim=10, lambda_=lambda_, nu=nu), states, measurements, prior_mean, prior_cov, cost_fn_ipls, ) rmses_lm_ipls[mc_iter, :] = tmp_rmse neeses_lm_ipls[mc_iter, :] = tmp_nees ls_cost_fn = partial( slr_smoothing_cost_means, measurements=measurements, m_1_0=prior_mean, P_1_0_inv=np.linalg.inv(prior_cov), motion_fn=motion_model.map_set, meas_fn=meas_model.map_set, slr_method=SigmaPointSlr(sigma_point_method), ) ms_ls_ipls, Ps_ls_ipls, cost_ls_ipls, tmp_rmse, tmp_nees = run_smoothing( SigmaPointLsIpls(motion_model, meas_model, sigma_point_method, num_iter, GridSearch, grid_search_points), states, measurements, prior_mean, prior_cov, ls_cost_fn, ) rmses_ls_ipls[mc_iter, :] = tmp_rmse neeses_ls_ipls[mc_iter, :] = tmp_nees label_ieks, label_lm_ieks, label_ls_ieks, label_ipls, label_lm_ipls, label_ls_ipls = ( "IEKS", "LM-IEKS", "LS-IEKS", "IPLS", "LM-IPLS", "LS-IPLS", ) rmse_stats = [ (rmses_ieks, label_ieks), (rmses_lm_ieks, label_lm_ieks), (rmses_ls_ieks, label_ls_ieks), (rmses_ipls, label_ipls), (rmses_lm_ipls, label_lm_ipls), (rmses_ls_ipls, label_ls_ipls), ] nees_stats = [ (neeses_gn_ieks, label_ieks), (neeses_lm_ieks, label_lm_ieks), (neeses_ls_ieks, label_ls_ieks), (neeses_gn_ipls, label_ipls), (neeses_lm_ipls, label_lm_ipls), (neeses_ls_ipls, label_ls_ipls), ] save_stats(Path.cwd() / "results" / experiment_name, "RMSE", rmse_stats) save_stats(Path.cwd() / "results" / experiment_name, "NEES", nees_stats) plot_scalar_metric_err_bar(rmse_stats, "RMSE") plot_scalar_metric_err_bar(nees_stats, "NEES")
import logging from flask_restful import Resource from src import log_file, log_level from src.utils import setup_logger TEST_LOGGER = setup_logger(logging.getLogger("TEST"), log_level, log_file) class Test(Resource): def get(self): TEST_LOGGER.debug("Get test API") return {"message": "Test get API"}, 401 def post(self): TEST_LOGGER.debug("Post test API") pass
def main(): args = parse_args() log = logging.getLogger(__name__) experiment_name = "ct_varying_sens_metrics.py" setup_logger(f"logs/{experiment_name}.log", logging.INFO) log.info(f"Running experiment: {experiment_name}") if not args.random: np.random.seed(0) dt = 0.01 qc = 0.01 qw = 10 prior_mean = np.array([0, 0, 1, 0, 0]) prior_cov = np.diag([0.1, 0.1, 1, 1, 1]) D_x = prior_mean.shape[0] K = 500 Q = np.array([ [qc * dt**3 / 3, 0, qc * dt**2 / 2, 0, 0], [0, qc * dt**3 / 3, 0, qc * dt**2 / 2, 0], [qc * dt**2 / 2, 0, qc * dt, 0, 0], [0, qc * dt**2 / 2, 0, qc * dt, 0], [0, 0, 0, 0, dt * qw], ]) motion_model = CoordTurn(dt, Q) sens_pos_1 = np.array([-1.5, 0.5]) sens_pos_2 = np.array([1, 1]) std = 0.5 R_uncertain = std**2 * np.eye(2) double_sensors = np.row_stack((sens_pos_1, sens_pos_2)) double_meas_model = MultiSensorBearings(double_sensors, R_uncertain) single_sensor = sens_pos_2.reshape((1, 2)) std = 0.001 R_certain = std**2 * np.eye(1) single_meas_model = MultiSensorBearings(single_sensor, R_certain) # Create a set of time steps where the original two sensor measurements are replaced with single ones. # single_meas_time_steps = set(list(range(0, 100, 5))[1:]) single_meas_time_steps = set(list(range(0, K, 50))[1:]) meas_model = BearingsVaryingSensors(double_meas_model, single_meas_model, single_meas_time_steps) num_iter = args.num_iter num_mc_samples = args.num_mc_samples rmses_ieks = np.zeros((num_mc_samples, num_iter)) rmses_lm_ieks = np.zeros((num_mc_samples, num_iter)) rmses_ls_ieks = np.zeros((num_mc_samples, num_iter)) rmses_ipls = np.zeros((num_mc_samples, num_iter)) rmses_lm_ipls = np.zeros((num_mc_samples, num_iter)) rmses_ls_ipls = np.zeros((num_mc_samples, num_iter)) neeses_ieks = np.zeros((num_mc_samples, num_iter)) neeses_lm_ieks = np.zeros((num_mc_samples, num_iter)) neeses_ls_ieks = np.zeros((num_mc_samples, num_iter)) neeses_ipls = np.zeros((num_mc_samples, num_iter)) neeses_lm_ipls = np.zeros((num_mc_samples, num_iter)) neeses_ls_ipls = np.zeros((num_mc_samples, num_iter)) lambda_ = 1e-2 nu = 10 D_x = prior_mean.shape[0] K = 500 init_traj = (np.zeros((K, D_x)), np.array(K * [prior_cov])) for mc_iter in range(num_mc_samples): log.info(f"MC iter: {mc_iter+1}/{num_mc_samples}") if args.random: states, measurements = simulate_data(motion_model, double_meas_model, prior_mean[:-1], time_steps=K) else: states, all_meas, _, xs_ss = get_specific_states_from_file( Path.cwd() / "data/lm_ieks_paper", Type.LM, num_iter) measurements = all_meas[:, 2:] # Change measurments so that some come from the alternative model. measurements = modify_meas(measurements, states, meas_model, True) cost_fn_eks = partial( analytical_smoothing_cost_time_dep, measurements=measurements, m_1_0=prior_mean, P_1_0=prior_cov, motion_model=motion_model, meas_model=meas_model, ) print(measurements[0]) return # ms_ieks, Ps_ieks, cost_ieks, tmp_rmse, tmp_nees = run_smoothing( # Ieks(motion_model, meas_model, num_iter), # states, # measurements, # prior_mean, # prior_cov, # cost_fn_eks, # init_traj, # ) # rmses_ieks[mc_iter, :] = tmp_rmse # neeses_ieks[mc_iter, :] = tmp_nees # ms_lm_ieks, Ps_lm_ieks, cost_lm_ieks, tmp_rmse, tmp_nees = run_smoothing( # LmIeks(motion_model, meas_model, num_iter, cost_improv_iter_lim=10, lambda_=lambda_, nu=nu), # states, # measurements, # prior_mean, # prior_cov, # cost_fn_eks, # init_traj, # ) # rmses_lm_ieks[mc_iter, :] = tmp_rmse # neeses_lm_ieks[mc_iter, :] = tmp_nees dir_der_eks = partial( dir_der_analytical_smoothing_cost, measurements=measurements, m_1_0=prior_mean, P_1_0=prior_cov, motion_model=motion_model, meas_model=meas_model, ) ms_ls_ieks, Ps_ls_ieks, cost_ls_ieks, tmp_rmse, tmp_nees = run_smoothing( LsIeks(motion_model, meas_model, num_iter, ArmijoLineSearch(cost_fn_eks, dir_der_eks, c_1=0.1)), states, measurements, prior_mean, prior_cov, cost_fn_eks, init_traj, ) rmses_ls_ieks[mc_iter, :] = tmp_rmse neeses_ls_ieks[mc_iter, :] = tmp_nees sigma_point_method = SphericalCubature() cost_fn_ipls = partial(slr_smoothing_cost_pre_comp, measurements=measurements, m_1_0=prior_mean, P_1_0_inv=np.linalg.inv(prior_cov)) # ms_ipls, Ps_ipls, cost_ipls, tmp_rmse, tmp_nees = run_smoothing( # SigmaPointIpls(motion_model, meas_model, sigma_point_method, num_iter), # states, # measurements, # prior_mean, # prior_cov, # None, # init_traj, # ) # rmses_ipls[mc_iter, :] = tmp_rmse # neeses_ipls[mc_iter, :] = tmp_nees # ms_lm_ipls, Ps_lm_ipls, cost_lm_ipls, tmp_rmse, tmp_nees = run_smoothing( # SigmaPointLmIpls( # motion_model, meas_model, sigma_point_method, num_iter, cost_improv_iter_lim=10, lambda_=lambda_, nu=nu # ), # states, # measurements, # prior_mean, # prior_cov, # cost_fn_ipls, # init_traj, # ) # rmses_lm_ipls[mc_iter, :] = tmp_rmse # neeses_lm_ipls[mc_iter, :] = tmp_nees ls_cost_fn = partial( slr_smoothing_cost_means, measurements=measurements, m_1_0=prior_mean, P_1_0_inv=np.linalg.inv(prior_cov), motion_fn=motion_model.map_set, meas_fn=meas_model.map_set, slr_method=SigmaPointSlr(sigma_point_method), ) ms_ls_ipls, Ps_ls_ipls, cost_ls_ipls, tmp_rmse, tmp_nees = run_smoothing( SigmaPointLsIpls(motion_model, meas_model, sigma_point_method, num_iter, partial(ArmijoLineSearch, c_1=0.1), 10), states, measurements, prior_mean, prior_cov, ls_cost_fn, init_traj, ) rmses_ls_ipls[mc_iter, :] = tmp_rmse neeses_ls_ipls[mc_iter, :] = tmp_nees label_ieks, label_lm_ieks, label_ls_ieks, label_ipls, label_lm_ipls, label_ls_ipls = ( "IEKS", "LM-IEKS", "LS-IEKS", "IPLS", "LM-IPLS", "LS-IPLS", ) rmse_stats = [ # (rmses_ieks, label_ieks), # (rmses_lm_ieks, label_lm_ieks), (rmses_ls_ieks, label_ls_ieks), # (rmses_ipls, label_ipls), # (rmses_lm_ipls, label_lm_ipls), (rmses_ls_ipls, label_ls_ipls), ] nees_stats = [ # (neeses_ieks, label_ieks), # (neeses_lm_ieks, label_lm_ieks), (neeses_ls_ieks, label_ls_ieks), # (neeses_ipls, label_ipls), # (neeses_lm_ipls, label_lm_ipls), (neeses_ls_ipls, label_ls_ipls), ] save_stats(Path.cwd() / "results" / experiment_name, "RMSE", rmse_stats) save_stats(Path.cwd() / "results" / experiment_name, "NEES", nees_stats) tikz_stats(Path.cwd() / "tmp_results", "RMSE", rmse_stats) tikz_stats(Path.cwd() / "tmp_results", "NEES", nees_stats) plot_scalar_metric_err_bar(rmse_stats, "RMSE") plot_scalar_metric_err_bar(nees_stats, "NEES")
import argparse import logging from src import log_file, log_level, address_mongo, port_mongo from flask import Flask from flask_restful import Api, Resource, reqparse from pymongo import MongoClient from src.modules.test import Test from src.modules.user import User from src.utils import setup_logger MAIN_LOGGER = setup_logger(logging.getLogger("MAIN"), log_level, log_file) def parse_argument(): parser = argparse.ArgumentParser(description="Test program", usage="python3 -m src [option]") parser.add_argument("--address", "-a", default="0.0.0.0", help="Host") parser.add_argument("--port", "-p", default=8080, help="Port") args = parser.parse_args() return args def main(args): MAIN_LOGGER.info("Hello world, info") MAIN_LOGGER.debug("Arguments: host: {}, port: {}".format( args.address, args.port)) app = Flask(__name__) api = Api(app) api.add_resource(Test, "/api/test/v1.0/test")