def set_body(self, size, body): self.logger.info("In: {}, {}".format(size, body)) # noinspection PyUnusedLocal pi = np.pi # Load tabular values bodies = load_config("body.csv") try: body = bodies[body] except: raise KeyError("Body \"{}\" is not in bodies {}.".format( body, bodies)) values = load_config("agent.csv")["value"] # Arguments for Agent # TODO: Scaling inertia_rot mass = self.truncnorm(body["mass"], body["mass_scale"], size) radius = np.random.uniform(0.25, 0.35, size) ratio_rt = body["ratio_rt"] ratio_rs = body["ratio_rs"] ratio_ts = body["ratio_ts"] target_velocity = self.truncnorm(body['velocity'], body['velocity_scale'], size) inertia_rot = eval(values["inertia_rot"]) * np.ones(size) target_angular_velocity = eval( values["target_angular_velocity"]) * np.ones(size) # Agent class self.agent = Agent(self.walls.size, size, mass, radius, ratio_rt, ratio_rs, ratio_ts, inertia_rot, target_velocity, target_angular_velocity)
def set_body(self, size, body): self.logger.info("In: {}, {}".format(size, body)) # noinspection PyUnusedLocal pi = np.pi # Load tabular values bodies = load_config("body.csv") try: body = bodies[body] except: raise KeyError("Body \"{}\" is not in bodies {}.".format( body, bodies)) values = load_config("agent.csv")["value"] # Arguments for Agent # TODO: Scaling inertia_rot mass = self.truncnorm(body["mass"], body["mass_scale"], size) radius = np.random.uniform(0.25, 0.35, size) ratio_rt = body["ratio_rt"] ratio_rs = body["ratio_rs"] ratio_ts = body["ratio_ts"] inertia_rot = eval(values["inertia_rot"]) * np.ones(size) target_angular_velocity = eval( values["target_angular_velocity"]) * np.ones(size) # Agents don't actually play a game in these simulations. Agent's are given fixed # strategies so that we can use the same crowd movement simulator as used in the # folder "model_with_game" imp_percentage = 0.5 n_imps = int(size * imp_percentage) imps = np.zeros(n_imps) pats = np.ones(size - n_imps) imps_pats = np.concatenate((imps, pats)) strategy = np.random.permutation(imps_pats) imp_strategists = np.where(strategy == 0) # IMPORTANT: CHANGE HERE THE AGENTS' SPEED AND SOCIAL FORCE PARAMETERS IN SIMULATIONS # WITH FIXED STRATEGIES target_velocity = 1 * np.ones(size) target_velocity[imp_strategists] = 5 tau_adj = 0.5 * np.ones(size) A_agent = 2000 * np.ones(size) # Helbing (2000) uses A_ij=2000 A_agent[imp_strategists] = 1000 B_agent = 0.08 * np.ones(size) # Helbing (2000) uses B_ij=0.08 A_wall = 2000 * np.ones(size) # Helbing (2000) uses A_iw=2000 B_wall = 0.08 * np.ones(size) # Helbing (2000) uses B_iw=0.08 # Agent class self.agent = Agent(strategy, tau_adj, A_agent, B_agent, A_wall, B_wall, self.walls.size, size, mass, radius, ratio_rt, ratio_rs, ratio_ts, inertia_rot, target_velocity, target_angular_velocity)
def configure_hdfstore(self, ext): if self.hdfstore is None: self.logger.info("") # Configure hdfstore file self.hdfstore = HDFStore(ext) # Add dataset parameters = load_config('parameters.yaml') args = self.agent, parameters['agent'] self.hdfstore.add_dataset(*args) self.hdfstore.add_buffers(*args) args = self, parameters['simulation'] self.hdfstore.add_dataset(*args) self.hdfstore.add_buffers(*args) args = self.game, parameters['game'] self.hdfstore.add_dataset(*args) self.hdfstore.add_buffers(*args) self.logger.info("") else: self.logger.info("Already configured.")
def __init__(self): super(MainWindow, self).__init__() # Logger self.logger = logging.getLogger("crowddynamics.gui.mainwindow") # Load ui files self.setupUi(self) # Loading data from configs self.configs = load_config("simulations.yaml") # Simulation with multiprocessing self.queue = Queue(maxsize=4) self.process = None # Graphics self.timer = QtCore.QTimer(self) self.plot = None # Buttons # RadioButton for initializing HDF5 saving for the simulation self.savingButton = QtGui.QRadioButton("Save to HDF5Store") # Button that initializes selected simulation self.initButton = QtGui.QPushButton("Initialize Simulation") # Configures. Should be last. self.configure_plot() self.configure_signals()
def configure(self, process: MultiAgentSimulation): """Configure static plot items and initial configuration of dynamic plot items (agents). :param process: Simulation process :return: """ self.logger.info("") # Clear previous plots and items self.clearPlots() self.clear() # Setup plots settings = load_config("graphics.yaml") if process.domain is not None: self.logger.debug("domain") domain = process.domain if isinstance(domain, Polygon): x, y = domain.exterior.xy x, y = np.asarray(x), np.asarray(y) self.setRange(xRange=(x.min(), x.max()), yRange=(y.min(), y.max())) item = pg.PlotDataItem(x, y) # settings["domain"]["brush"] if process.exits is not None: self.logger.debug("exits") exits = process.exits for exit_ in exits: if isinstance(exit_, LineString): x, y = exit_.xy x, y = np.asarray(x), np.asarray(y) item = pg.PlotDataItem(x, y) # TODO: pen if process.agent is not None: self.logger.debug("agent") agent = process.agent if agent.three_circle: model = ThreeCircle(agent.r_t, agent.r_s) model.set_data(agent.position, agent.position_ls, agent.position_rs, active=agent.active) for item in model.items: self.addItem(item) else: model = Circular(agent.radius) model.set_data(agent.position, active=agent.active) self.addItem(model) self.agent = model if process.obstacles is not None: self.logger.debug("Obstacles") obstacles = process.obstacles for obstacle in obstacles: if isinstance(obstacle, LineString): x, y = obstacle.xy x, y = np.asarray(x), np.asarray(y) item = pg.PlotDataItem(x, y, pen='k') self.addItem(item)
def __init__(self, radius): super(Circular, self).__init__() self.settings = load_config("graphics.yaml")["agent"] self.radius = radius symbol_size = 2 * radius symbol_pen = np.zeros_like(radius, dtype=object) symbol_brush = np.zeros_like(radius, dtype=object) kwargs = dict( pxMode=False, pen=None, symbol='o', symbolSize=symbol_size, symbolPen=symbol_pen, symbolBrush=symbol_brush, ) for key, val in self.settings["active"].items(): kwargs[key][:] = val self.setData(**kwargs)
import importlib import logging import sys from functools import partial from multiprocessing import Queue from crowddynamics.functions import load_config from crowddynamics.gui.graphics import MultiAgentPlot configs = load_config("simulations.yaml") # Simulation with multiprocessing. queue = Queue(maxsize=4) process = None d = configs["simulations"]["room_evacuation_game"] module = importlib.import_module(d["module"]) # Simulation on RoomEvacuationGame luokan instanssi simulation = getattr(module, d["class"]) process = simulation(queue, **d["kwargs"]) args = [(("agent", "agent"), ["position", "active", "position_ls", "position_rs"])] # Since the game is not played, the strategies are not saved. if process.game is not None: args.append((("game", "agent"), ["strategy"])) process.configure_queuing(args) # The simulation results are saved to the author's folders in the University server. # If you are running the code, change the saving location something suitable.