def optimizer_worker(train): print("Optimizer worker started") while train.replay_buffer.ready() == False: sleep(10) while True: for _ in range(5): sampled = train.replay_buffer.get_samples(train.batch_size) state_batch, action_batch, value_batch = zip(*sampled) # SB need to read file from state.txt file to prevent json file is too big states = [] for state_path in state_batch: world = World(state_path) state = world.current_state() states.append(state) # need change visit count to probability distribution actions = [] for visit_count in action_batch: visit_count_array = np.array(visit_count) total_visit = np.sum(visit_count_array) probability_distribution = visit_count_array / total_visit actions.append(probability_distribution) state_batch_reshaped = np.reshape( states, (-1, train.obs_shape[1], train.obs_shape[0], train.obs_shape[2])) action_batch_reshaped = np.reshape(actions, (-1, train.act_shape)) value_batch_reshaped = np.reshape(value_batch, (-1, 1)) loss, entropy = train.network.train_step(state_batch_reshaped, action_batch_reshaped, value_batch_reshaped, train.learning_rate) print(loss, entropy) train.network.save_model(os.path.join(os.getcwd(), "latest"))
class Application: def __init__(self): pygame.init() pygame.display.set_caption('SideScroller') pygame.display.set_mode([1024,600]) self.world = World() self.world.addManager('player',MovementManager()) self.world.addManager('scrollable',ScrollManager()) self.world.addManager('friction',FrictionManager()) self.world.load('lvl/maps.txt','lvl/1.txt') self.clock = pygame.time.Clock() self.go = True def loop(self): while self.go: events = pygame.event.get() for event in events: if event.type == pygame.QUIT: sys.exit() self.clock.tick(60) self.world.update( events ) self.world.draw(pygame.display.get_surface()) pygame.display.flip()
def fill_up_results(config): counter = 0 start_c = 45 tresholds = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] tresholds2 = [0.1, 0.3, 0.5, 0.7, 0.9] cell_nums = [10, 30, 50, 70, 90] cell_nums = [25, 50, 75, 100, 125, 150, 175, 200, 225, 250] added_fig_nums = [512 * 1, 512 * 2, 512 * 4, 512 * 8, 512 * 16] res_f = open("Data/results.csv", "a+") for fig_added_treshold in [0.98]: for added_fig_num in [512 * 32]: for cell_num in cell_nums: w = World(config, 1.0, (cell_num, cell_num), (cell_num / 25) * 2 * 512, fig_added_treshold, (10**5) * 5) for i in range(5): if counter >= start_c: print("START", counter, "/", 5 * 10) results = w.perform_rsa(save_summary=False) res_f.write( str(fig_added_treshold) + "," + str(added_fig_num) + "," + str(cell_num) + "," + str(results["summary"]["total_time"]) + "\n") print("END:", results["summary"]["total_time"]) counter += 1
def _set_ref(self, new_ref_name): '''Changes the reference frame of the action step''' new_ref = World.get_ref_from_name(new_ref_name) if (new_ref != ArmState.ROBOT_BASE): index = ActionStepMarker._ref_names.index(new_ref_name) new_ref_obj = ActionStepMarker._ref_object_list[index - 1] else: new_ref_obj = Object() if (self.action_step.type == ActionStep.ARM_TARGET): if self.arm_index == 0: self.action_step.armTarget.rArm = World.convert_ref_frame( self.action_step.armTarget.rArm, new_ref, new_ref_obj) else: self.action_step.armTarget.lArm = World.convert_ref_frame( self.action_step.armTarget.lArm, new_ref, new_ref_obj) elif (self.action_step.type == ActionStep.ARM_TRAJECTORY): for i in range(len(self.action_step.armTrajectory.timing)): if self.arm_index == 0: arm_old = self.action_step.armTrajectory.rArm[i] arm_new = World.convert_ref_frame(arm_old, new_ref, new_ref_obj) self.action_step.armTrajectory.rArm[i] = arm_new else: arm_old = self.action_step.armTrajectory.lArm[i] arm_new = World.convert_ref_frame(arm_old, new_ref, new_ref_obj) self.action_step.armTrajectory.lArm[i] = arm_new if self.arm_index == 0: self.action_step.armTrajectory.rRefFrameObject = new_ref_obj self.action_step.armTrajectory.rRefFrame = new_ref else: self.action_step.armTrajectory.lRefFrameObject = new_ref_obj self.action_step.armTrajectory.lRefFrame = new_ref
def setUpClass(cls): cls.world_one = World(7, 9) cls.world_two = World(10, 15) cls.actor_one = Actor() cls.actor_two = Actor() cls.actor_three = Actor() cls.world_two.addObject(cls.actor_two, 5, 10)
def __loop(self, key: str): ''' experiment one parameter @para: - key: [str] the parameter's name ''' step = self.step if self.conf_start[key] > self.conf_end[key]: step = -1 * self.step print(f'\033[33m {key} ------- \033[0m') for i in range(self.conf_start[key], self.conf_end[key], step): print(f'{key}={i}') tmp_gini = [] # each parameter will be executed self.loop_num times for _ in range(self.loop_num): self.conf[key] = i world = World(self.conf) _, gini_results, _, _, _ = world.simulate() # Using the average of the latest 200 tickets gini index # as a single running's result avg_gini = np.average(gini_results[-200:]) tmp_gini.append(avg_gini) print(avg_gini) config = list(self.conf.values()) # use the average gini index as the eventually gini index for # a specific configuration config.append(np.average(tmp_gini)) self.results.append(config)
def find_hyper_param(algorithm='sarsa'): epsilon = 1 gamma = 0.9 episodes = 100000 MSE = [] params = [] alphas = [0.2, 0.02, 0.002, 0.0002, 0.00002] rates = [0.1, 0.01, 0.001, 0.0001, 0.00001] env = World() for i in range(len(rates)): epsilon_decay = rates[i] for j in range(len(alphas)): alpha = alphas[j] if algorithm == 'sarsa': Q, scores = env.sarsa(epsilon, gamma, alpha, episodes, decay_rate=epsilon_decay) else: Q, scores = env.q_learning(epsilon, gamma, alpha, episodes, decay_rate=epsilon_decay) MSE.append(calculate_mse(np.array(np.max(Q, axis=1)))) params.append([alpha, epsilon_decay]) plot_learning(episodes, scores, epsilon_decay, alpha) best_run = np.argmin(MSE) print(MSE) print('The best run is: %s, MSE: %s, alpha: %s, decay rate: %s' % (best_run, MSE[best_run], params[best_run][0], params[best_run][1]))
def play(self, should_stow): stepsCounter = StepCounter() steps = 0 stow_times = list( np.random.normal(loc=self.stowing_mean, size=self.number_of_passengers) ) if should_stow else [0] * self.number_of_passengers for i in range(self.number_of_passengers): #creating passengers World.get_instance().add_passenger( Passenger(random.choice(self.randomColors), self.destinies[i], i, stow_times[i])) while True: steps += 1 stepsCounter.updateSteps(steps) for passenger in World.get_instance().passengers: passenger.move() #end of symulation end = True for ball in World.get_instance().passengers: if ball.position != ball.destiny: end = False if end == True: World.get_instance().reset() stepsCounter.clear() World.get_instance().update() break if self.visualise: World.get_instance().update() time.sleep(0.2) return steps
def __init__(self): pg.init() self.screen = pg.display.set_mode((SCREENWIDTH*TILESIZE, SCREENHEIGHT*TILESIZE)) pg.display.set_caption("Car Network") self.running = False self.world = World(self.screen)
def __init__(self, # World shape, x, y, theta, map_size, # ParticleFilter num_particles, # Experiment lookahead_depth, discount): self.world = World(shape, x, y, theta, map_size) self.particle_filter = ParticleFilter(shape, num_particles, map_size) self.lookahead_depth = lookahead_depth self.discount = discount # Convergence criteria self.mean_xy_threshold = 1 self.mean_theta_threshold = np.pi / 8 self.std_xy_threshold = 2 self.std_theta_threshold = np.pi / 8 # Accumulators self.entropies = [] self.costs = [] self.previous_action = None self.converged = None
def __init__(self, config): # initialize tasks self.config = config self.tasks = [] for t in config["tasks"]: self.tasks.append(Task(t["id"], t["time"], t["target"])) self.agents = [] for a in config["agents"]: self.agents.append( Agent(a["id"], a["pos"], a["prefs"], maxCap=a["maxCap"], restTime=a["restTime"], speed=a["speed"])) self.world = World(rows=config["world"]["rows"], cols=config["world"]["cols"], agents=self.agents, tasks=self.tasks) for a in self.agents: a.world = self.world self.initiators = [] headers = ["Step", "Task"] for a in self.agents: headers.append("A nr. " + str(a.id) + " offers:") self.pt = PrettyTable(headers)
def main(): #User input goalReward = input('Enter the reward for the goal state: ') pitReward = input('Enter the reward for a pit state: ') actionReward = input('Enter the reward for taking an action: ') giveUpReward = input('Enter the reward for giving up: ') trialNum = input('Enter the number of trials to train the agent for: ') epsilon = input('Enter the epsilon parameter: ') #Testing Vars #goalReward = 10000 #pitReward = -200 #actionReward = -.1 #giveUpReward = -1 #trialNum = 15000 #epsilon = 0.01 if epsilon == 0: epsilon = 0.01 #World Setup myWorld = World(float(goalReward), float(pitReward), float(giveUpReward), float(actionReward)) myWorld.setUpWorld() #Agent setup agent = Agent() agent.train(int(trialNum), float(epsilon), myWorld) print("End")
def __init__(self): "Constcteur" self.type = None self.action = None self.characters = [] self.world = World()
def main(): n = 3 F = 5 Di = n * 3 WIDTH = 100 HEIGHT = 100 MAX_SPEED = 0.8 MIN_SPEED = 0.2 LOW_VALUE = 0.01 DEATH_LIMIT = 5 show_annotations = False sleepInterval = 25 initEnergies = [100 for i in range(n)] # initEnergies = [100, 91, 83] w = World(n, F, Di, WIDTH, HEIGHT, MAX_SPEED, MIN_SPEED, LOW_VALUE, DEATH_LIMIT, show_annotations, sleepInterval, initEnergies) w.playWorld()
def __init__(self): self.__world = World() self.__menu = 'main' # Character self.__ying = Ying(5, 6, 5) self.__taylor = Taylor(6, 6, 5) self.__mizuki = Mizuki(7, 7, 4) self.__cecilia = Cecilia(5, 5, 6) self.__skeleton = Skeleton(0, 0, 0) self.__character = None self.__boolean = None # Food and drink to start self.__foods = 4 self.__drinks = 5 # Inventory self.__musket = 0 self.__firstAid = 0 self.__computer = 0 self.__gas = 0 # Value for running the game self.__time = 0 self.__exist = False self.__isNumber2 = None self.__special = None self.__electricity = True
def load_world(world_file): from World import World # Reset entity handler handler.reset() # Get world type with open(world_file.full_file, "rb") as file: world_type = int.from_bytes(file.read(2)[1:2], byteorder) # Load world global world del world if world_type == World.WORLD: world = World.World(world_file) elif world_type == World.IDLE: world = World.IdleWorld(world_file) # Load the world if not LoadWorld(world).run_now(): pg.quit() exit(0) # Set up player map player.set_map_source(world.map) # Spawn the player player.spawn()
def load_game(self, file_name): self.players = [] current_team_name = None current_team_obj = None with open(file_name, "r") as f: first_line = f.readline() if first_line[0] == "M": self.map_name = first_line.split(":")[1].rstrip() self.world = World(self.map_name) print("creating map from " + self.map_name) self.set_color_prefs(Game.default_prefs) for line in f: # print(line[0]) if line[0] == "t": current_team_name = line.split("-")[1].split("@")[0] host, port = line.split("-")[1].split("@")[1].split(":") port = int(port) print("creating new team: " + current_team_name) current_team_obj = Team(self.world, current_team_name) self.add_player(Server_Facing_Networked_Player(self, current_team_obj, host, port)) elif line[0] == "m": loc = location(line.split("@")[1][:-1]) current_team_obj.create_mech(line[2:6], loc) elif line[0] == "s": loc = location(line.split("@")[1][:-1]) self.world.create_station_at(loc, current_team_obj)
def init(): """ Game initialization function. 1. Creates a L{Debugger} (debugger) and L{EventTimer} (eventTimer) and stores references to them in an instance of L{EventManager}. 2. Several listeners are started and registered with eventManager: - Instance of L{Universe} (universe) - Instance of L{UserInterface} (ui) - Instance of L{Window} (gameWindow) 3. We send the eventManager a message to start the game - This message is interpreted by the gameWindow """ debugger = Debugger() eventTimer = EventTimer() #Create the event manager for low-level events eventManager = Manager(eventTimer,debugger) #FIXME: more specific manager\ #classes will be needed later? try: client = GameClient(eventManager,host='10.41.24.79',port=1567) except: pass #Create the occurence manager for high-level events (same across client and server) #FIXME: NOT YET IMPLEMENTED #Note: Do we even need this anymore? - Julian #Response: I don't think so. - Jared #=========================================== #Create and register the standard listeners #With the event manager #=========================================== #FIXME: Not yet fully implemented #THIS WILL BE CHANGED LATER TO ACCOUNT FOR LOADING, ETC. # World w is set to the activeWorld of the universe universe = Universe(eventManager) ui = UserInterface(eventManager,universe.activeWorld) gameWindow = Window(eventManager,width=1024,height=768) w = World() universe.changeWorld(w) #=========================================== # Initialize 500 entities in World w for i in range(100): #w.addEntity(Entity('ball.png',i*50,i*50, w, (255,255,255))) #w.addEntity(TestEntity('testBuilding.png', i*50, i*50, w, 'alpha')) w.addEntity(TestEntity('testCraft.png',i*50,i*50,w,'alpha')) eventManager.post(Event.WorldManipulationEvent()) #Notify the manager that the window should start to accept input: eventManager.post(Event.StartEvent()) return eventManager.eventTypesToListeners
def step_impl(context, element_name, value): value = world.replace_variables(value) element = world.find_element(element_name) if value in element.value: return True else: log.failed("Verify value contains?", element.value, value)
def build(self): Config.set('graphics', 'width', self.settings.WINDOW_WIDTH) Config.set('graphics', 'height', self.settings.WINDOW_HEIGHT) game = World(self.settings) game.draw() Clock.schedule_interval(game.update, self.settings.UPDATE_SPEED) return game
def _offset_pose(pose, constant=1): '''Offsets the world pose for visualization''' transform = World.get_matrix_from_pose(pose) offset_array = [constant * ActionStepMarker._offset, 0, 0] offset_transform = tf.transformations.translation_matrix(offset_array) hand_transform = tf.transformations.concatenate_matrices( transform, offset_transform) return World.get_pose_from_transform(hand_transform)
def d_callback(self): self.mode = "D" Clock.schedule_interval(self.update, 1.0/20.0) self.world = World("R") self.kc = KeyboardControl(self.world) self.disable_buttons() self.lower_color.disabled = False self.upper_color.disabled = False
def step_impl(context, element_name, value): value = world.replace_variables(value) element = world.find_element(element_name) if element.get_attribute('title') == value: return True else: log.failed("Verify tooltip text is?", element.title, value)
def _offset_pose(pose, constant=1): '''Offsets the world pose for visualization''' transform = World.get_matrix_from_pose(pose) offset_array = [constant * ActionStepMarker._offset, 0, 0] offset_transform = tf.transformations.translation_matrix(offset_array) hand_transform = tf.transformations.concatenate_matrices(transform, offset_transform) return World.get_pose_from_transform(hand_transform)
def __init__(self, parent): Tk.__init__(self, parent) self.parent = parent self.title("Game of Life") # Declare Tk variables to dynamically update labels self.gridSize = StringVar() self.generationLabel = StringVar() self.autoGenerating = False # Default grid size 25 self.world = World(self, 25) self.world.grid(row=0, column=0) # Set initial grid values self.gridSize.set("25x25") self.generationLabel.set("0") self.world.generateCells() # Create all necessary buttons actions = Frame(self) actions.grid(row=1, column=0, pady=10) randomize = Button(actions, text="Randomize", command=self.randomizeGen) randomize.grid(row=0, column=0, padx=10) clear = Button(actions, text="Clear", command=self.clearGrid) clear.grid(row=0, column=1, padx=10) startGen = Button(actions, text="Start", command=self.startAutoGeneration) startGen.grid(row=0, column=2, padx=10) startGen = Button(actions, text="Stop", command=self.stopAutoGeneration) startGen.grid(row=0, column=3, padx=10) nextGen = Button(actions, text="Next Generation", command=self.nextGeneration) nextGen.grid(row=0, column=4, padx=10) gen = Label(actions, textvariable=self.generationLabel) gen.grid(row=0, column=5, padx=10) smaller = Button(actions, text="-", command=self.decreaseGrid) smaller.grid(row=0, column=6, ipadx=5, padx=10) bigger = Button(actions, text="+", command=self.increaseGrid) bigger.grid(row=0, column=7, ipadx=5, padx=10) size = Label(actions, textvariable=self.gridSize) size.grid(row=0, column=8) mainloop()
def _make_gripper_marker(self, control, is_hand_open=False): '''Makes a gripper marker''' if is_hand_open: angle = 28 * numpy.pi / 180.0 else: angle = 0 transform1 = tf.transformations.euler_matrix(0, 0, angle) transform1[:3, 3] = [0.07691 - ActionStepMarker._offset, 0.01, 0] transform2 = tf.transformations.euler_matrix(0, 0, -angle) transform2[:3, 3] = [0.09137, 0.00495, 0] t_proximal = transform1 t_distal = tf.transformations.concatenate_matrices( transform1, transform2) mesh1 = self._make_mesh_marker() mesh1.mesh_resource = ('package://pr2_description/meshes/' + 'gripper_v0/gripper_palm.dae') mesh1.pose.position.x = -ActionStepMarker._offset mesh1.pose.orientation.w = 1 mesh2 = self._make_mesh_marker() mesh2.mesh_resource = ('package://pr2_description/meshes/' + 'gripper_v0/l_finger.dae') mesh2.pose = World.get_pose_from_transform(t_proximal) mesh3 = self._make_mesh_marker() mesh3.mesh_resource = ('package://pr2_description/meshes/' + 'gripper_v0/l_finger_tip.dae') mesh3.pose = World.get_pose_from_transform(t_distal) quat = tf.transformations.quaternion_multiply( tf.transformations.quaternion_from_euler(numpy.pi, 0, 0), tf.transformations.quaternion_from_euler(0, 0, angle)) transform1 = tf.transformations.quaternion_matrix(quat) transform1[:3, 3] = [0.07691 - ActionStepMarker._offset, -0.01, 0] transform2 = tf.transformations.euler_matrix(0, 0, -angle) transform2[:3, 3] = [0.09137, 0.00495, 0] t_proximal = transform1 t_distal = tf.transformations.concatenate_matrices( transform1, transform2) mesh4 = self._make_mesh_marker() mesh4.mesh_resource = ('package://pr2_description/meshes/' + 'gripper_v0/l_finger.dae') mesh4.pose = World.get_pose_from_transform(t_proximal) mesh5 = self._make_mesh_marker() mesh5.mesh_resource = ('package://pr2_description/meshes/' + 'gripper_v0/l_finger_tip.dae') mesh5.pose = World.get_pose_from_transform(t_distal) control.markers.append(mesh1) control.markers.append(mesh2) control.markers.append(mesh3) control.markers.append(mesh4) control.markers.append(mesh5) return control
def _make_gripper_marker(self, control, is_hand_open=False): '''Makes a gripper marker''' if is_hand_open: angle = 28 * numpy.pi / 180.0 else: angle = 0 transform1 = tf.transformations.euler_matrix(0, 0, angle) transform1[:3, 3] = [0.07691 - ActionStepMarker._offset, 0.01, 0] transform2 = tf.transformations.euler_matrix(0, 0, -angle) transform2[:3, 3] = [0.09137, 0.00495, 0] t_proximal = transform1 t_distal = tf.transformations.concatenate_matrices(transform1, transform2) mesh1 = self._make_mesh_marker() mesh1.mesh_resource = ('package://pr2_description/meshes/' + 'gripper_v0/gripper_palm.dae') mesh1.pose.position.x = -ActionStepMarker._offset mesh1.pose.orientation.w = 1 mesh2 = self._make_mesh_marker() mesh2.mesh_resource = ('package://pr2_description/meshes/' + 'gripper_v0/l_finger.dae') mesh2.pose = World.get_pose_from_transform(t_proximal) mesh3 = self._make_mesh_marker() mesh3.mesh_resource = ('package://pr2_description/meshes/' + 'gripper_v0/l_finger_tip.dae') mesh3.pose = World.get_pose_from_transform(t_distal) quat = tf.transformations.quaternion_multiply( tf.transformations.quaternion_from_euler(numpy.pi, 0, 0), tf.transformations.quaternion_from_euler(0, 0, angle)) transform1 = tf.transformations.quaternion_matrix(quat) transform1[:3, 3] = [0.07691 - ActionStepMarker._offset, -0.01, 0] transform2 = tf.transformations.euler_matrix(0, 0, -angle) transform2[:3, 3] = [0.09137, 0.00495, 0] t_proximal = transform1 t_distal = tf.transformations.concatenate_matrices(transform1, transform2) mesh4 = self._make_mesh_marker() mesh4.mesh_resource = ('package://pr2_description/meshes/' + 'gripper_v0/l_finger.dae') mesh4.pose = World.get_pose_from_transform(t_proximal) mesh5 = self._make_mesh_marker() mesh5.mesh_resource = ('package://pr2_description/meshes/' + 'gripper_v0/l_finger_tip.dae') mesh5.pose = World.get_pose_from_transform(t_distal) control.markers.append(mesh1) control.markers.append(mesh2) control.markers.append(mesh3) control.markers.append(mesh4) control.markers.append(mesh5) return control
def __init__(self): print("Press Ctrl-C to quit") self.gamepad = GamePad() self.screen = Screen() self.entityManager = EntityManager() self.player = Player(self) self.world = World(self) self.entityManager.add(self.player)
def test_victory_message_p2_wins(add_to_screen_mock): world = World() world.p1score = 4 world.p2score = 5 text = Text() screen = MagicMock() text.victory_message(world, screen) add_to_screen_mock.assert_any_call(screen, 100, "The winner is Player 2!", 640, 320)
def step_impl(context, element_name, value): value = world.replace_variables(value) element = world.find_element(element_name) classes = element.get_attribute('class') if value in classes: return True else: log.failed("Verify class contains?", classes, value)
def step_impl(context, element_name, value): value = world.replace_variables(value) element = world.find_element(element_name) placeholder = element.get_attribute('placeholder') if placeholder == value: return True else: log.failed("Verify placeholder text is?", placeholder, value)
def __init__(self, interactive=False, delay=100): World.__init__(self) self.delay = delay self.title('AmoebaWorld') self.running = False self.make_canvas() if interactive: self.make_control_panel()
def base_experiment(init_plants): global fig_number loc = "/Users/sasha/desktop/plant_data/base_experiment" world = World(init_plants) toimage(world.ideal_world).save( loc + "/{0}/ideal_world_{1}.png".format(init_plants, init_plants)) for i in range(1000): if i < 100 and i % 10 == 0: toimage( world.color_world).save(loc + "/{0}/time_{1}_world_{2}.png".format( init_plants, i, init_plants)) if i % 100 == 0: toimage( world.color_world).save(loc + "/{0}/time_{1}_world_{2}.png".format( init_plants, i, init_plants)) world.env_step() toimage(world.color_world).save( loc + "/{0}/final_world_{1}.png".format(init_plants, init_plants)) fig_number += 1 plt.figure(fig_number) plt.title("Population Growth") plt.xlabel("Time") plt.ylabel("Plants Alive") plt.plot(world.spread_progress) plt.savefig( loc + "/{0}/population_progress_{1}.png".format(init_plants, init_plants)) np.save( loc + "/{0}/population_progress_{1}".format(init_plants, init_plants), world.spread_progress) fig_number += 1 plt.figure(fig_number) plt.title("Convergence Progress") plt.xlabel("Time") plt.ylabel("Percentage") plt.plot(world.convergence_progress) plt.savefig( loc + "/{0}/convergence_progress_{1}.png".format(init_plants, init_plants)) np.save( loc + "/{0}/convergence_progress_{1}".format(init_plants, init_plants), world.convergence_progress) fig_number += 1 plt.figure(fig_number) plt.title("Fitness Progress") plt.xlabel("Time") plt.ylabel("Survival Probability") plt.plot(world.fitness_progress) plt.savefig( loc + "/{0}/fitness_progress_{1}.png".format(init_plants, init_plants)) np.save(loc + "/{0}/fitness_progress_{1}".format(init_plants, init_plants), world.fitness_progress)
def __init__(self, parentFSM, doneEvent): World.__init__(self, doneEvent) self.fsm.setName(CIGlobals.OToontown) self.fsm.addState(State('MGHood', self.enterMGHood, self.exitMGHood, ['quietZone'])) self.fsm.addState(State('CTHood', self.enterCTHood, self.exitCTHood, ['quietZone'])) self.fsm.getStateNamed('quietZone').addTransition('MGHood') self.fsm.getStateNamed('quietZone').addTransition('CTHood') self.parentFSM = parentFSM self.parentFSM.getStateNamed(CIGlobals.OToontown).addChild(self.fsm)
def init(host='localhost',server=None): """ Most of this code is copied from init() function in client.py Game initialization function. 1. Creates a L{Debugger} (debugger) and L{EventTimer} (eventTimer) and stores references to them in an instance of L{EventManager}. 2. Several listeners are started and registered with eventManager: - Instance of L{Universe} (universe) - Instance of L{UserInterface} (ui) - Instance of L{Window} (gameWindow) 3. We send the eventManager a message to start the game - This message is interpreted by the gameWindow """ #Creates a Debugger that posts events to the terminal for debugging purposes debugger = Debugger() eventTimer = EventTimer() #Create the event manager for low-level events eventManager = Manager(eventTimer,debugger) #FIXME: more specific manager\ Entity.manager = eventManager # World w is set to the activeWorld of the universe universe = Universe(eventManager) ui = UserInterface(eventManager,universe.activeWorld,'BROADCASTSERVER') gameWindow = Window(eventManager,width=1024,height=768) gameWindow.fullscreenMode = False gameWindow.updateScreenMode() w = World(universe) s.world=w networked = True client = GameClient(eventManager,host=s.host,port=1567) #wait until the client is assigned an ID before proceeding while client.ID == None: import time time.sleep(.02) print 'Got an ID',client.ID clientID = client.ID ui.setClientID(clientID) wManipulator = WorldManipulator(eventManager,w,networked,gameClientID = clientID) #generate the resources in the server, the existance of these #resources will propogate through to every client when they connect w._generateResources() #Notify the manager that the window should start to accept input: eventManager.post(Event.StartEvent()) return eventManager.eventTypesToListeners
def setup_class(self): self.town1 = Town('Los Mantos', (-2, 0), 500, [10, 10], 'Coastal', []) self.town2 = Town('Kallac', (1, 1), 500, [10, 10], 'Karst', []) self.town3 = Town('Vinas', (2, -2), 500, [10, 10], 'Hilly', []) self.towns = [self.town1, self.town2, self.town3] self.player = Trader("The player", [1, 1], [0, 0], 15000, [], []) self.traders = [self.player] self.world = World(0, self.towns, self.traders)
def step_impl(context, value): value = world.replace_variables(value) if not value[:-4].lower() == '.png': value = value + '.png' folder = os.path.dirname(value) if not folder == '': if not os.path.exists(folder): os.makedirs(folder) if not world.save_current_page_screenshot(value): log.failed("Could not save a screenshot.")
def test_update_ball_in_rim(reset_mock, check_for_collision_mock, ball_state, scored): world = World() world.ball.update = MagicMock() world.ball.state = ball_state world.update(0.1, 50) check_for_collision_mock.assert_called_once() world.ball.update.assert_called_once_with(0.1) reset_mock.assert_not_called() assert world.scored == scored
def __init__(self): self.width = GAME_WIDTH self.height = GAME_HEIGHT self.frame = tkinter.Tk() self.canvas = tkinter.Canvas(self.frame, width = self.width, height = self.height) # Game World self.world = World(self.width, self.height) # Pack self.canvas.pack()
def step_impl(context, element_name, value): value = world.replace_variables(value) element = world.find_element(element_name) styles = element.get_attribute('style') print styles if value in styles: return True else: log.failed("Verify style contains?", styles, value)
def __init__(self,r=1.0,s=1.0,v=1.0,cloudsize=1.0) : World.__init__(self,r,s,v,cloudsize) Tk.__init__(self) self.title("World") self.iconname("World") self.frame = Frame(self) self.frame.pack(side=TOP,expand=YES,fill=BOTH) self.setupMenu() self.setupWindow() self.setupOptionsEntry()
def __init__(self, canvas_size=500, cell_size=10, interactive=False): World.__init__(self) self.title('GIS Modling and Problem Solving Final Project') self.canvas_size = canvas_size self.cell_size = cell_size self.cells = {} if interactive: self.make_canvas() self.make_control()
def __init__(self, grid = None, numAgents = None, numTargets = None, policyOption = None, modelRepr = None, initState = None, checkpoints = None, maxSteps = None, horizon = None, discount = None): self.world = World(grid) self.numAgents = 1 if numAgents == None else numAgents self.numTargets = 1 if numTargets == None else numTargets self.policyOption = Model.RTBSS_CHECKPOINT if policyOption == None else policyOption self.modelRepr = Model.SYSTEM if modelRepr == None else modelRepr self.state = self.__getDefaultInitState() if initState == None else self.getInitState(initState) self.checkpoints = self.__getDefaultCheckpoints() if checkpoints == None else self.getCheckpoints(checkpoints) self.checkpointIndeces = [0 for i in range(self.numAgents)] self.DEFAULT_MAX_STEPS = [20 for i in range(self.numAgents)] self.maxSteps = self.__getDefaultMaxSteps() if maxSteps == None else maxSteps self.horizon = 1 if horizon == None else horizon self.discount = 0.95 if discount == None else discount self.reward = 0 # Initialize sets of all actions self.agentActionLabels = ["stay", "go N", "go E", "go S", "go W"] self.agentActions = [(0, 0), (-1, 0), (0, 1), (1, 0), (0, -1)] self.targetActionLabels = ["stay", "go N", "go E", "go S", "go W"] self.targetActions = [(0, 0), (-1, 0), (0, 1), (1, 0), (0, -1)] #self.targetActions = [(0, 0), (-1, 0), (0, 1), (1, 0), (0, -1), (-2, 0), (0, 2), (2, 0), (0, -2)] #self.targetActions = [(0, 0), (-1, 0), (0, 1), (1, 0), (0, -1), (-2, 0), (0, 2), (2, 0), (0, -2), (-3, 0), (0, 3), (3, 0), (0, -3)] #self.targetActions = [(0, 0)] self.actions = Util.cartesianPower(self.agentActions, self.numAgents) #Util.getOrderedCombinations(self.agentActions, self.numAgents) self.targetCompoundActions = Util.cartesianPower(self.targetActions, self.numTargets) #Util.getOrderedCombinations(self.targetActions, self.numTargets) # Initialize lists of all states self.robotStates = self.world.robotStates self.agentCompoundStates = [] self.targetCompoundStates = [] self.states = [] self.__initStates() # Initialize transition, observation and reward functions self.transitionFcn = TransitionFunction(self) self.observationFcn = ObservationFunction(self) self.agentObservations = [frozenset([robotState]) for robotState in self.robotStates] self.observations = self.observationFcn.getObservations() self.rewardFcn = self.observationFcn.getRewardFunction() # Reward function is generated upon observation function init self.observation = frozenset() # Last set of observations self.observationList = [] # Last observation as enumerated set self.ambiguousObservation = [] # Last subset of observation that is ambiguous if self.modelRepr == Model.DECENTRALIZED: self.partialRewardFcn = dict() self.__initPartialRewardFcn() # Initialize belief self.belief = dict() self.__initBelief() # Initialize policy solver self.solver = Solver(self, self.discount, self.horizon) # If results should be printed in terminal self.doPrint = False
def __init__(self, interactive=False): World.__init__(self) self.title("TurtleWorld") # the interpreter executes user-provided code self.make_interpreter(globals()) # make the GUI self.setup() if interactive: self.setup_interactive()
def step_impl(context, value): value = world.replace_variables(value) alert = world.get_alert_when_exist() if alert is None: log.failed("The popup alert not visible") if alert.text == value: return True else: log.failed("Verify popup message is?", alert.text, value)
def startNewSim(self,simParam): self.stopSimulation() with self.lock: self.thread_exit = False self.isRunning = True self.worldBuffer = Queue.Queue() world = World(simParam) #starts the world self.staticWorld = copy.deepcopy(world) #static copy to merge and send to GUI self.worldBuffer.put((0,world.copyDynamicState())) #puts dynamic state in the first spot self.simThread = threading.Thread(target=self.simulate, args=(world, 0, 1)) #runs the thread self.runTimeS = time.clock() self.simThread.start()
def step_impl(context, value): value = world.replace_variables(value) if not value[:-5].lower() == '.html': value = value + '.html' folder = os.path.dirname(value) if not folder == '': if not os.path.exists(folder): os.makedirs(folder) filename = open(value + '.html','w') page_source = world.get_current_page_source() filename.write(page_source.encode('utf8')) filename.close()
def step_impl(context, element_name, value): value = world.replace_variables(value) element = world.find_element(element_name) if element.value |more_than| value: return True else: log.failed( "Verify value is more than?", "value = %s" % element.value, "value > %s" % value )
def step_impl(context, element_name, value): value = world.replace_variables(value) element = world.find_element(element_name) if element.value |less_than_or_equal| value: return True else: log.failed( "Verify value is less than or equal?", "value = %s" % element.value, "value <= %s" % value )
def step_impl(context, element_name1, element_name2): element1 = world.find_element(element_name1) element2 = world.find_element(element_name2) if element1.value |more_than| element2.value: return True else: log.failed( "Verify element1 value is greater than element2 value?", "element1 value = %s , element2 value = %s" % (element1.value, element2.value), "element1 value > element2 value" )
def step_impl(context, element_name1, element_name2): element1 = world.find_element(element_name1) element2 = world.find_element(element_name2) if element1.value |less_than_or_equal| element2.value: return True else: log.failed( "Verify element1 value is less than or equal to element2 value?", "element1 value = %s , element2 value = %s" % (element1.value, element2.value), "element1 value <= element2 value" )
def extract_startsize(a): beetype, formation, formnumber, seed = a w = World(beetype, len(formation), len(formation)*2, len(formation)*2, {"seed":0, "formation":formation}, seed, True) w.stepForward() i = 0 return [seed, beetype.__name__, formnumber, w.sizeOfWorld]
def __init__(self, canvas_size=500, cell_size=5, interactive=False): World.__init__(self) self.title('CellWorld') self.canvas_size = canvas_size self.cell_size = cell_size # cells is a map from index tuples to Cell objects self.cells = {} if interactive: self.make_canvas() self.make_control()
def before_feature(cls, context, feature): if hasattr(context.config, "browser_size") and context.config.browser_size is not None: name = context.config.browser_size size = Responsive.get_browser_size(name) world.size = size ## setup browser browser = "firefox" if hasattr(context.config, "browser") and context.config.browser is not None: browser = context.config.browser world.open_browser(browser) Logger.driver = world.driver
def __init__(self, interactive=False): World.__init__(self) self.title('TurtleWorld') # the interpreter executes user-provided code g = globals() g['world'] = self self.make_interpreter(g) # make the GUI self.setup() if interactive: self.setup_interactive()
def Simulation(fileName): chronicles = Chronicles("Test Simulation", "Eckhart Arnold", "A Test of the 'Augmented Experiment' Prototype with a simulation.") world = World(chronicles) agents = [Random() for i in range(2)] + \ [ModerateEgoist() for i in range(10)] + \ [EgoistPunisher() for i in range(10)] + \ [SimpleHeuristics() for i in range(10)] + \ [SimpleHeuristicsPunisher() for i in range(10)] world.setup(agents, PublicGoodsGame(1.6), 30, 20, 20) world.run() #report = Report(chronicles) with open(fileName, "w") as f: f.write(chronicles.toJSON())
def _get_arm_states(self): '''Returns the current arms states in the right format''' abs_ee_poses = [Arms.get_ee_state(0), Arms.get_ee_state(1)] joint_poses = [Arms.get_joint_state(0), Arms.get_joint_state(1)] states = [None, None] for arm_index in [0, 1]: nearest_obj = self.world.get_nearest_object( abs_ee_poses[arm_index]) if (nearest_obj == None): states[arm_index] = ArmState(ArmState.ROBOT_BASE, abs_ee_poses[arm_index], joint_poses[arm_index], Object()) else: # Relative rel_ee_pose = World.transform( abs_ee_poses[arm_index], 'base_link', nearest_obj.name) states[arm_index] = ArmState(ArmState.OBJECT, rel_ee_pose, joint_poses[arm_index], nearest_obj) return states