def __init__(self, params, observer): self.rng = np.random.RandomState(params.seed) self.area_width = params.area_size[0] self.area_height = params.area_size[1] env = dworp.NullEnvironment() time = dworp.BasicTime(params.steps) scheduler = dworp.BasicScheduler() self.space = Space2d(self.area_width, self.area_height) boids = [self.create_boid(params) for x in range(params.pop)] for boid in boids: self.space.add(boid, boid.point) super().__init__(boids, env, time, scheduler, observer)
def __init__(self, params, observer): self.rng = np.random.RandomState(params.seed) env = SugarscapeEnvironment(self.rng) # construct our agents agents = [self.create_agent() for _ in range(params.pop)] # assign them locations & add them to the environment for agent in agents: empty_locations = env.empty_locations() # note: can't just do self.rng.choice(empty_locations), because numpy thinks a list of tuples is # multi-dimensional, and rng.choice only works with 1-d arrays. env.move( agent, empty_locations[self.rng.choice(range(len(empty_locations)))]) super().__init__(agents, env, dworp.InfiniteTime(), dworp.BasicScheduler(), observer, SugarscapeTerminator())
def __init__(self, params, observer): self.params = params self.rng = np.random.RandomState(params.seed) time = dworp.InfiniteTime() scheduler = dworp.BasicScheduler() terminator = BirthTerminator() # start out with an equal number of red and blue people num_people = params.capacity // 2 red = [ Person('red', params.red_fertility, self.rng) for x in range(num_people) ] blue = [ Person('blue', params.blue_fertility, self.rng) for x in range(num_people) ] people = red + blue env = BirthEnvironment({'red': len(red), 'blue': len(blue)}) super().__init__(people, env, time, scheduler, observer, terminator)
self.state.fill(0) def step(self, new_time, agents): self.state[self.TEMP] = np.random.randint(self.MIN_TEMP, self.MAX_TEMP) self.logger.info("Temperature is now {}".format(self.state[self.TEMP])) @property def temp(self): return self.state[self.TEMP] class ShortsObserver(dworp.Observer): def step(self, time, agents, env): count = sum([agent.wearing_shorts for agent in agents]) print("{}: Temp {} - Shorts {}".format(time, env.temp, count)) def done(self, agents, env): print("Simulation over") logging.basicConfig(level=logging.WARN) g = igraph.Graph.Erdos_Renyi(n=100, p=0.05, directed=False) agents = [CollegeStudent(v) for v in g.vs] env = WeatherEnvironment(g) time = dworp.BasicTime(10) scheduler = dworp.BasicScheduler() observer = ShortsObserver() sim = dworp.TwoStageSimulation(agents, env, time, scheduler, observer) sim.run()