def generate_model_sample(n_runs, model_params, seed=[None]): """ function for generating stationsim model runs to test Parameters ------ n_runs : int `n_runs`number of stationsim runs to generate. Must be positive intiger. model_params : dict `model_params` dictionary of model parameters required for stationsim to run. See stationsim_model.py for more details. seed : list `seed` seeding for stationsim. Can a single seed for every run or a list of n_runs seeds. """ models = [] if len(seed) == 1: seed *= n_runs elif len(seed) != n_runs: print("not enough seeds specificed. Either provide precisely 1 or " + f"{n_runs} seeds") for _ in range(n_runs): model = Model(**model_params) while model.status == 1: model.step() models.append(model) return models
def generate_Model_Sample(self, n_runs, model_params, single_process=False): """ function for generating stationsim model runs to test Parameters ------ n_runs : int `n_runs`number of stationsim runs to generate. Must be positive intiger. model_params : dict `model_params` dictionary of model parameters required for stationsim to run. See stationsim_model.py for more details. single_process : bool (default False) whether to run the models as a single process or using multiple processes simultaneously. Returns ------ models : list a list of completed stationsim `models` given the required width, height, pop_total, and gate_speed. """ #placeholder list models = [] if n_runs > 1 and model_params["random_seed"] != None: raise Exception("Error: the 'random_seed' parameter is not None\ which means that all models generate the same results, which\ I'm sure isn't what you want!") elif n_runs < 1: raise Exception( "Error: need one or more 'n_runs', not {}".format(n_runs)) #supress excessive printing with HiddenPrints(): if single_process or n_runs == 1: for _ in range(n_runs): #generate model and run til status goes back to 0 (finished) model = Model(**model_params) while model.status == 1: model.step() models.append(model) else: pool = multiprocessing.Pool() try: numcores = multiprocessing.cpu_count() models = pool.map(stationsim_RipleysK.run_model, [model_params for _ in range(n_runs)]) finally: pool.close( ) # Make sure whatever happens the processes are killed return models
def assign_agents(cls, particle_num: int, state: np.array, model: Model): """ Assign the state of the particles to the locations of the agents. :param particle_num :param state: The state of the particle to be assigned :param model: The model to assign the state to :type model: Return the model after having the agents assigned according to the state. """ model.set_state(state, sensor='location') return model
def make_truth_data(model_params): """ Run StationSim to generate synthetic truth data. Returns a list of the states of each agent; Each list entry is the state of the agents at a timestep. """ # Run model with provided params model = Model(model_params) model.batch() # Extract agent tracks return model.state_history
def test_StationSim_seeding(self): """ Test stationsim seeding by running two models with same seed and comparing positions. Parameters ------ model_params : dict `model_params` dictionary of stationsim model_parameters. seed : int `seed` for fixing numpy random outputs. must be 0<int<2**32-1. see stationsim_model for model parameter defintions""" model_params = { 'pop_total':5, 'width': 200, 'height': 100, 'gates_in': 3, 'gates_out': 2, 'gates_space': 1, 'gates_speed': 1, 'speed_min': .2, 'speed_mean': 1, 'speed_std': 1, 'speed_steps': 3, 'separation': 5, 'max_wiggle': 1, 'step_limit': 3600, 'do_history': True, 'do_print': True, "random_seed" : 8**8 } model1 = Model(**model_params) model1 = model_Run(model1) array1 = np.hstack(model1.history_state) model2 = Model(**model_params) model2 = model_Run(model2) array2 = np.hstack(model2.history_state) self.assertAlmostEqual(np.nansum(array1-array2), 0)
def aggregate_params(n, bin_size, model_params, ukf_params): """update ukf_params with fx/hx and their parameters for experiment 2 Parameters ------ ukf_params : dict Returns ------ ukf_params : dict """ model_params["pop_total"] = n base_model = Model(**model_params) ukf_params["bin_size"] = bin_size ukf_params["poly_list"] = grid_poly(model_params["width"], model_params["height"], ukf_params["bin_size"]) ukf_params["p"] = np.eye(2 * n) #inital guess at state covariance ukf_params["q"] = np.eye(2 * n) ukf_params["r"] = np.eye(len(ukf_params["poly_list"])) #sensor noise ukf_params["fx"] = fx ukf_params["fx_kwargs"] = {"base_model": base_model} ukf_params["hx"] = hx2 ukf_params["hx_kwargs"] = {"poly_list": ukf_params["poly_list"]} ukf_params["obs_key_func"] = obs_key_func ukf_params["obs_key_kwargs"] = {"pop_total": n} ukf_params["file_name"] = ex2_pickle_name(n, bin_size) return model_params, ukf_params, base_model
def omission_params(n, prop, model_params, ukf_params): """update ukf_params with fx/hx and their parameters for experiment 1 - assign population size and proportion observed. - randomly select agents to observed for index/index2 - assign initial covariance p as well as sensor and process noise (q,r) - assign transition and measurement functions (fx,hx) - assign observation key function and numpy file name for saving later. Parameters ------ n, prop : float `n` population and proportion observed 0<=`prop`<=1 model_params, ukf_params : dict dictionaries of model `model_params` and ukf `ukf_params` parameters Returns ------ model_params, ukf_params : dict updated dictionaries of model `model_params` and ukf `ukf_params` parameters ready to use in ukf_ss """ model_params["pop_total"] = n model_params["station"] = None base_model = Model(**model_params) ukf_params["prop"] = prop ukf_params["sample_size"]= floor(n * prop) ukf_params["index"], ukf_params["index2"] = omission_index(n, ukf_params["sample_size"]) ukf_params["p"] = np.eye(2 * n) #inital guess at state covariance ukf_params["q"] = np.eye(2 * n) ukf_params["r"] = np.eye(2 * ukf_params["sample_size"])#sensor noise ukf_params["fx"] = fx ukf_params["fx_kwargs"] = {"base_model" : base_model} ukf_params["fx_kwargs_update"] = None ukf_params["hx"] = hx1 ukf_params["hx_kwargs"] = {"index2" : ukf_params["index2"], "n" : n, "index" : ukf_params["index"],} ukf_params["obs_key_func"] = obs_key_func ukf_params["file_name"] = ex1_pickle_name(n, prop) ukf_params["light"] = True ukf_params["record"] = True return model_params, ukf_params, base_model
def run_model(model_params): """ Create a new stationsim model using `model_params` and step it until it has finished. Parameters ------ model_params : dict `model_params` dictionary of model parameters required for stationsim to run. See stationsim_model.py for more details. Returns ------ model : StaionSim object the finished model """ model = Model(**model_params) while model.status == 1: model.step() return model
def cone_params(n, cameras, model_params, ukf_params): """update ukf_params with fx/hx and their parameters for experiment 4 -add ground truth stationsim model base_model Parameters ------ n : int `n` population total cameras: list list of camera_Sensor objects. Each camera has a polygon it observes. model_params, ukf_params : `dict` default stationsim `model_params` and ukf `ukf_params` parameter dictionaries to be updated for the experiment to run. Returns ------ model_params, ukf_params : `dict` updated default stationsim `model_params` and ukf `ukf_params` dictionaries base_model : `class` initiated stationsim model `base_model` used as the ground truth. """ #stationsim truth model model_params["pop_total"] = n base_model = Model(**model_params) #cameras ukf_params["cameras"] = cameras #noise structures ukf_params["p"] = 0.1 * np.eye(2 * n) #inital guess at state covariance ukf_params["q"] = 0.01 * np.eye(2 * n) #process noise "sensor noise here dynamically updated depending on how many agents in the cameras." ukf_params["r"] = 0.01 * np.eye(2 * n) #sensor noise #kalman functions ukf_params["fx"] = fx ukf_params["fx_kwargs"] = {"base_model": base_model} ukf_params["hx"] = hx4 ukf_params["hx_kwargs"] = { "cameras": cameras, "n": n, } ukf_params["obs_key_func"] = obs_key_func ukf_params["hx_kwargs_update_function"] = hx4_kwargs_updater #pickle file name ukf_params["file_name"] = ex4_pickle_name(n) return model_params, ukf_params, base_model
def ex0_params(n, noise, sample_rate, model_params, ukf_params): """update ukf_params with fx/hx and their parameters for experiment 1 - assign population size, observation noise, and sampling/assimilation rate - assign initial covariance p as well as sensor and process noise (q,r) - assign transition and measurement functions (fx,hx) - assign observation key function and numpy file name for saving later. Parameters ------ n, noise, sample_rate : float `n` population, additive `noise`, and `sample_rate` sampling rate model_params, ukf_params : dict dictionaries of model `model_params` and ukf `ukf_params` parameters Returns ------ model_params, ukf_params : dict updated dictionaries of model `model_params` and ukf `ukf_params` parameters ready to use in ukf_ss """ model_params["pop_total"] = n ukf_params["noise"] = noise ukf_params["sample_rate"] = sample_rate base_model = Model(**model_params) ukf_params["p"] = np.eye(2 * n) #inital guess at state covariance ukf_params["q"] = np.eye(2 * n) ukf_params["r"] = np.eye(2 * n) #sensor noise ukf_params["fx"] = fx ukf_params["fx_kwargs"] = {"base_model": base_model} ukf_params["hx"] = hx0 ukf_params["hx_kwargs"] = {} ukf_params["obs_key_func"] = None ukf_params[ "file_name"] = f"config_agents_{n}_rate_{sample_rate}_noise_{noise}" return model_params, ukf_params, base_model
def cone_params(n, cameras, model_params, ukf_params): """update ukf_params with fx/hx and their parameters for experiment 2 Parameters ------ n : int `n` population total cameras: list list of camera_Sensor sensor objects. model_params, ukf_params : dict Returns ------ model_params, ukf_params : dict """ model_params["pop_total"] = n base_model = Model(**model_params) ukf_params["cameras"] = cameras ukf_params["p"] = np.eye(2 * n) #inital guess at state covariance ukf_params["q"] = np.eye(2 * n) #process noise "sensor noise here dynamically updated depending on how many agents in the cameras." ukf_params["r"] = np.eye(2 * n) #sensor noise ukf_params["fx"] = fx ukf_params["fx_kwargs"] = {"base_model": base_model} ukf_params["hx"] = hx4 ukf_params["hx_kwargs"] = {"cameras": cameras} ukf_params["obs_key_func"] = obs_key_func ukf_params["obs_key_kwargs"] = {"pop_total": n} ukf_params["file_name"] = """ex4_pickle_name(n, cameras) """ return model_params, ukf_params, base_model
def setUpClass(cls): """unittest.TestCase's special __init__ """ # init params for a SEEDED stationsim cls.model_params = { 'width': 200, 'height': 50, 'pop_total': 2, 'gates_speed': 1, 'gates_in': 3, 'gates_out': 2, 'gates_space': 1, 'speed_min': .2, 'speed_mean': 1, 'speed_std': 1, 'speed_steps': 3, 'separation': 5, 'max_wiggle': 1, 'step_limit': 3600, 'do_history' : True, 'do_print' : True, 'random_seed' : 8, } cls.ukf_params = configs.ukf_params cls.base_model = Model(**cls.model_params) pool = multiprocessing.Pool(processes = multiprocessing.cpu_count()) cls.rjmcmc_ukf = rjmcmc_ukf(cls.model_params, cls.ukf_params, cls.base_model, pool)
def step_particle(cls, particle_num: int, model: Model, num_iter: int, particle_std: float, particle_shape: tuple): """ Step a particle, assign the locations of the agents to the particle state with some noise, and then use the new particle state to set the location of the agents. :param particle_num: The particle number to step :param model: A pointer to the model object associated with the particle that needs to be stepped :param num_iter: The number of iterations to step :param particle_std: the particle noise standard deviation :param particle_shape: the shape of the particle array """ # Force the model to re-seed its random number generator (otherwise each child process # has the same generator https://stackoverflow.com/questions/14504866/python-multiprocessing-numpy-random model.set_random_seed() for i in range(num_iter): model.step() noise = np.random.normal(0, particle_std ** 2, size=particle_shape) state = model.get_state(sensor='location') + noise model.set_state(state, sensor='location') return model, state
'gates_in': 3, 'gates_out': 2, 'gates_space': 1, 'gates_speed': 1, 'speed_min': .2, 'speed_mean': 1, 'speed_std': 1, 'speed_steps': 3, 'separation': 5, 'max_wiggle': 1, 'step_limit': 3600, 'do_history': True, 'do_print': True, } model = Model(**model_params) while model.status != 0: model.step() marker_attributes = { "markers": { -1: "o" }, "colours": { -1: "black" }, "labels": { -1: "Pseudo-Truths" } }
def ex3_params(n, model_params, ukf_params): """update ukf_params with fx/hx and their parameters for experiment 1 - assign population size and proportion observed. - randomly select agents to observed for index/index2 - assign initial covariance p as well as sensor and process noise (q,r) - assign transition and measurement functions (fx,hx) - assign observation key function and numpy file name for saving later. Parameters ------ n, prop : float `n` population and proportion observed 0<=`prop`<=1 model_params, ukf_params : dict dictionaries of model `model_params` and ukf `ukf_params` parameters Returns ------ model_params, ukf_params : dict updated dictionaries of model `model_params` and ukf `ukf_params` parameters ready to use in ukf_ss """ model_params["pop_total"] = n model_params["station"] = None base_model = Model(**model_params) model_params["exit_gates"] = base_model.gates_locations[ -model_params["gates_out"]:] model_params["get_gates_dict"], model_params[ "set_gates_dict"] = gates_dict(base_model) width = model_params["width"] height = model_params["height"] ukf_params["boundary"] = generate_Camera_Rect(np.array([0, 0]), np.array([0, height]), np.array([width, height]), np.array([width, 0])) ukf_params["p"] = np.eye(1 * 2 * n) #inital guess at state covariance ukf_params["q"] = 0.05 * np.eye(1 * 2 * n) ukf_params["r"] = 0.01 * np.eye(1 * 2 * n) #sensor noise ukf_params["x0"] = base_model.get_state("location") ukf_params["fx"] = fx3 ukf_params["fx_kwargs"] = { "state": ukf_params["x0"], "boundary": ukf_params["boundary"], "get_gates_dict": model_params["get_gates_dict"], "set_gates_dict": model_params["set_gates_dict"], "set_gates": set_gates, "exit_gates": model_params["exit_gates"] } ukf_params["fx_kwargs_update"] = fx3_kwargs_updater ukf_params["hx"] = hx3_1 ukf_params["hx_kwargs"] = {} ukf_params["obs_key_func"] = obs_key_func ukf_params["file_name"] = ex3_pickle_name(n) return model_params, ukf_params, base_model
""" a - alpha between 1 and 1e-4 typically determines spread of sigma points. however for large dimensions may need to be even higher b - beta set to 2 for gaussian. determines trust in prior distribution. k - kappa usually 0 for state estimation and 3-dim(state) for parameters. not 100% sure what kappa does. think its a bias parameter. !! might be worth making an interactive notebook that varies these. for fun """ ukf_params = { "a": 1, "b": 2, "k": 0, } base_model = Model(**model_params) u = ukf_ss(model_params, filter_params, ukf_params, base_model) u.init_ukf(ukf_params) #%% def state_test(u): """Make sure the ukf state is still in tact. """ "check an array of correct size" assert type(u.ukf.x) == np.ndarray assert np.shape(u.ukf.x) == (model_params["pop_total"] * 2, ) "check all agents within boundaries"
""" def __init__(self): pass def test_Sigmas(self): pass def test_Unscented_Mean(self): pass def test_Covariance(self): pass class Test_ex1(object): """tests for 1st ukf experiment module """ def __init__(self, model): pass if __name__ == "__main__": start_model = Model(**model_params) macros = Test_macros(start_model, 8**8, default_model_params, default_ukf_params)