def custom_init(self, nart): """Create the world + base camp.""" w = worlds.World() nart.camp.contents.append(w) self.register_element("WORLD", w) self.chapter = Chapter(end_rank=0, world=w) self.add_first_locale_sub_plot(nart) # Determine the chapter sizes. if nart.end_rank < 7: min_dungeon_size = 2 else: min_dungeon_size = 3 num_dungeon, extra_points = divmod(nart.end_rank, min_dungeon_size) # Step One- Generate a plot sequence, starting at the end and moving # backwards to the beginning. # Step Two- Moving forward through the plot, connect the plot points. # Step Three- Add resources and whatnot. for job in characters.PC_CLASSES: self.add_sub_plot(nart, "RESOURCE_JOBTRAINER", PlotState(elements={"JOB": job})) return True
def test_02_world_init(self): num_seeds = 6 # number of seed points to start land generation perc_land = 20 # % of world that is land perc_sea = 80 # % of world that is sea perc_blocked = 4 # % of world that is blocked myWorld = worlds.World(22, 44, ['O', 'P', '@']) # TODO - fix passing self.assertEqual( str(myWorld.grd)[0:44], '............................................') self.assertEqual('.' in str(myWorld.grd), True) self.assertEqual('X' in str(myWorld.grd), False) # should be all blank before we build self.assertEqual('#' in str(myWorld.grd), False) # the world using build random myWorld.build_random(num_seeds, perc_land, perc_sea, perc_blocked) #print(myWorld.grd) self.assertEqual(myWorld.grd.get_grid_width(), 44) self.assertEqual(myWorld.grd.get_grid_height(), 22) self.assertEqual('.' in str(myWorld.grd), True) self.assertEqual('X' in str(myWorld.grd), True) self.assertEqual('#' in str(myWorld.grd), True)
def test_03_mountains(self): myWorld = worlds.World(20, 70, ['A', 'B', '@']) myWorld.build_random(5, 15, 85, 4) #print(myWorld.grd) self.assertEqual(myWorld.grd.get_grid_width(), 70) self.assertEqual(myWorld.grd.get_grid_height(), 20) myWorld.add_mountains() myWorld.add_new_seed() myWorld.add_mountains() print(myWorld)
def build_world(height, width): num_seeds = 6 # number of seed points to start land generation perc_land = 20 # % of world that is land perc_sea = 80 # % of world that is sea perc_blocked= 4 # % of world that is blocked myWorld = my_world.World( height, width, ['.','X','#']) # TODO - fix passing myWorld.build_random( num_seeds, perc_land, perc_sea, perc_blocked) myWorld.grd.save('test_world.txt') return myWorld
def build_base(self): """ create a base random land structure using the AIKIF world model """ #print('Planet ' + self.name + ' has formed!') self.world = my_world.World( self.grid_height, self.grid_width, [' ','x','#']) perc_land = (self.lava + (self.wind/10) + (self.rain/20) + (self.sun/10))*100 perc_sea = (100 - perc_land) perc_blocked = (self.lava/10)*100 #print('Calculating world : sea=', perc_sea, ' land=', perc_land, ' mountain=', perc_blocked, ) self.world.build_random( self.num_seeds, perc_land, perc_sea, perc_blocked)
def custom_init(self, nart): """Create the world + starting scene.""" w = worlds.World() nart.camp.contents.append(w) self.register_element("WORLD", w) self.chapter = Chapter(end_rank=10, world=w) if not self.setting: self.setting = context.SET_RENFAN self.add_first_locale_sub_plot(nart, locale_type="ETERNAL_CITY") for job in characters.PC_CLASSES: self.add_sub_plot(nart, "RESOURCE_JOBTRAINER", PlotState(elements={"JOB": job})) return True
def test_05_run_aikif_agent(self): """ recreates agent list as per world_generator and puts them in a world. NOTE - uses World object, not planet """ import math from random import randint import aikif.agents.explore.agent_explore_grid as agt log_folder = os.path.join(os.getcwd(), 'test_results') myWorld = worlds.World(70, 20, ['.', 'X', '#']) #myWorld.build_random( 5, 60, 30, 10) #target_coords = [math.floor(myWorld.grd.grid_height/2) + randint(1, math.floor(myWorld.grd.grid_height/2)) - 3, \ # math.floor(myWorld.grd.grid_width /2) + randint(1, math.floor(myWorld.grd.grid_width/2)) - 5] target_coords = myWorld.pick_random_target() agt_list = [] for agt_num in range(0, 4): ag = agt.ExploreAgent('exploring_agent' + str(agt_num), log_folder, False, 1) start_y, start_x = myWorld.grd.find_safe_starting_point() ag.set_world(myWorld.grd, [start_y, start_x], [target_coords[0], target_coords[1]]) agt_list.append(ag) sim = worlds.WorldSimulation(myWorld, agt_list, 1) sim.run(9, 'Y', log_folder + os.sep) self.assertTrue(len(str(sim)) > 10) #print(sim.agent_list[0].agent_locations) #print(sim.agent_list[0]) self.assertFalse(agt_list[0].current_x == 4545) self.assertFalse(agt_list[0].current_y == 9895) agt_list[0].current_x = 4545 agt_list[0].current_y = 9895 self.assertEqual(agt_list[0].current_x, 4545) self.assertEqual(agt_list[0].current_y, 9895)
def custom_init(self, nart): """Create the world + chapter + city, then load INTRO_2""" w = worlds.World() nart.camp.contents.append(w) self.register_element("WORLD", w) self.chapter = Chapter(world=w) self.add_first_locale_sub_plot(nart) sp = self.add_sub_plot(nart, "INTRO_1") for job in characters.PC_CLASSES: self.add_sub_plot(nart, "RESOURCE_JOBTRAINER", PlotState(elements={"JOB": job})) self.add_sub_plot(nart, "TESTPLOT", spstate=PlotState().based_on(sp), necessary=False) return True
def custom_init(self, nart): """Create the world + starting scene.""" w = worlds.World() nart.camp.contents.append(w) self.register_element("WORLD", w) self.chapter = Chapter(end_rank=0, world=w) if not self.setting: self.setting = context.SET_RENFAN self.add_first_locale_sub_plot(nart) # Determine the dungeon sizes. if nart.end_rank < 7: min_dungeon_size = 2 else: min_dungeon_size = 3 num_dungeon, extra_points = divmod(nart.end_rank, min_dungeon_size) prev_chapter = self.chapter prev_subplot = self for t in range(num_dungeon): # We're going to add dungeons/chapters sequentially. new_chapter = Chapter(follows=prev_chapter) new_chapter.end_rank = new_chapter.start_rank + min_dungeon_size - 1 if t == num_dungeon - 1: new_chapter.end_rank += extra_points prev_subplot = self.add_sub_plot( nart, "BARDIC_DUNGEON", PlotState(chapter=new_chapter).based_on(prev_subplot)) prev_chapter = new_chapter # At this point, we can add the conclusion. self.add_sub_plot(nart, "BARDIC_CONCLUSION", PlotState(rank=nart.end_rank).based_on(prev_subplot)) for job in characters.PC_CLASSES: self.add_sub_plot(nart, "RESOURCE_JOBTRAINER", PlotState(elements={"JOB": job})) return True
def test_04_verify_agents(self): """ check that a failed verify agent works simulator.Simulator('BadSim', name, world, agents, agent_locations, actions) """ world = worlds.World(40, 20, ['.', 'X', '#']) world.build_random(8, 40, 70, 30) agt1 = mod_agt.Agent(name='agt_9001', fldr=os.getcwd()) agt2 = mod_agt.Agent(name='agt_9002', fldr=os.getcwd()) agents = [agt1, agt2] agt1.set_coords({'x': 2, 'y': 1, 'z': 0, 't': 0}) agt2.set_coords({'x': 3, 'y': 4, 'z': 0, 't': 0}) actions = ['walk'] s04 = simulator.Simulator('sim04', world, agents, actions) self.assertTrue(s04._verify_agents()) # now add a duplicate agent name agents.append(agt1) #print('DUPLICATE AGENT LIST = ', [str(a) for a in agents]) self.assertFalse(s04._verify_agents())
def q_3_4(world_type, eta, n_time_steps): world = worlds.World(world_type=world_type) weights = np.ones(3) loss_experts = np.zeros(3) # list of lists cumulative_loss_learner = np.zeros(n_time_steps) sum_loss_experts = np.zeros(3) cumulative_loss_experts = [[0 for i in range (3)] for j in range(n_time_steps)] sum_loss_learner = 0 for time_step in range(n_time_steps): expert_pred = [expert_1(time_step), \ expert_2(time_step), \ expert_3(time_step)] y_pred = weighted_majority(expert_pred, weights) # check implementation in worlds.py y_label = world.get_label(time_step=time_step, expert_pred=expert_pred, expert_weights=weights) # update weights # todo vectorize loss_learner = calculate_loss(y_pred, y_label) sum_loss_learner = sum_loss_learner + loss_learner cumulative_loss_learner[time_step] = sum_loss_learner # loss_experts = [calculate_loss(pred, y_label) for pred in expert_pred] for i in range(3): weights[i] = weights[i]*(1-(eta*(expert_pred[i]!=y_label))) loss_experts[i] = calculate_loss(expert_pred[i], y_label) sum_loss_experts[i] = sum_loss_experts[i] + loss_experts[i] cumulative_loss_experts[time_step][i] = sum_loss_experts[i] plot_loss(cumulative_loss_learner, cumulative_loss_experts) plot_regret(cumulative_loss_learner, cumulative_loss_experts, n_time_steps) plt.show()