Esempio n. 1
0
    def frame_step(self, action):
        if self.map_style == 'default':
            # Move cat.
            if self.num_steps % 5 == 0:
                self.move_cat()

        # Turning.
        turning = False
        if action == 0:  # Turn right.
            self.car_body.angle -= .05
            turning = True
        elif action == 1:  # Turn left.
            self.car_body.angle += .05
            turning = True

        self.driving_direction = Vec2d(1, 0).rotated(self.car_body.angle)
        self.car_body.velocity = 50 * self.driving_direction

        # Update the screen and stuff.
        screen.fill(THECOLORS["black"])
        draw(screen, self.space)
        self.space.step(1./10)
        if draw_screen:
            pygame.display.flip()
        clock.tick()

        # Get the current location and the readings there.
        x, y = self.car_body.position

        # Set and get our sensor readings.
        self.sensor_obj.set_readings(x, y, self.car_body.angle)

        # Get readings.
        proximity_sensors = self.sensor_obj.get_readings()

        # The 3rd list item is the middle sonar.
        forward_sonar = proximity_sensors[2]

        # Now set the proximity sensors.
        proximity_sensors = proximity_sensors[0:2]

        # Get the sonar sweep reading.
        sonar_sweep = self.sensor_obj.get_sonar_sweep_readings()

        # State is sweep reading + middle sonar.
        state = sonar_sweep + [forward_sonar]

        # Set the reward.
        reward = self.get_reward(proximity_sensors, turning)

        # Show sensors.
        if show_sensors:
            pygame.display.update()

        # Numpy it.
        state = np.array([state])

        self.num_steps += 1

        return reward, state
Esempio n. 2
0
 def frame_step(self,action):
     self.plane_pre_position = copy.deepcopy(self.plane_body.position)
     if action == 0:
         self.plane_body.angle -= .2
     elif action == 1:
         self.plane_body.angle += .2
     else:
     #    self.plane_body.angle = Vec2d()
     #flying_direction = Vec2d(1,0).rotated(self.plane_body.angle)
         plane_position_vector = Vec2d(self.plane_body.position)
         target_pisition_vector = Vec2d(self.target_body.position)
         #angle = planet_position_vector.get_angle_between(target_pisition_vector)
         angle = (target_pisition_vector - plane_position_vector).normalized().get_angle()
         
         self.plane_body.angle =  angle
         
     #flying_direction = Vec2d(1,0).rotated(self.target_body.angle)
     #flying_direction = target_pisition_vector.rotated(angle)
     flying_direction = Vec2d(1,0).rotated(self.plane_body.angle)
     #self.plane_body.angle = flying_direction.get_angle()
     
     self.plane_body.velocity = 100 * flying_direction
     
     screen.fill(THECOLORS["black"])
     draw(screen,self.space)
     self.space.step(1./10)
     if draw_screen:
         pygame.display.flip()
     clock.tick()
     x,y = self.plane_body.position
     readings,readings_position,reading_target = self.get_sonar_readings(x,y,self.plane_body.angle)
     
     state = np.array([readings])
     if self.reached_target(reading_target):
         self.rearched = True
         reward = 500
     elif self.plane_is_crashed(readings):
         self.crashed = True
         reward = -500
         self.recover_from_crash(flying_direction)
     else:
         # Higher readings are better, so return the sum.
         #reward = -5 + int(self.sum_readings(readings) / 10)
         reward = -5 + self.check_move_effect()
     self.num_steps += 1
     
     #if 1 in readings:
     #    print("reached the target!")
    
     if True in reading_target:
         target = True
     else: target= False    
     #    self.plane_pre_position = copy.deepcopy(self.plane_body.position)
     if self.num_steps % 100 == 0:
         self.move_obstacles() 
     
     return reward,state,target
Esempio n. 3
0
 def recover(self):
     self.car_body.velocity = -100 * self.driving_direction
     for i in range(4):
         self.car_body.angle += .2  # Turn a little.
         screen.fill(THECOLORS["black"])
         draw(screen, self.space)
         self.space.step(1./10)
         if draw_screen:
             pygame.display.flip()
         clock.tick()
 def recover_from_crash(self, driving_direction):
     """
     We hit something, so recover.
     """
     while self.crashed:
         # Go backwards.
         self.car_body.velocity = -100 * driving_direction
         self.crashed = False
         for i in range(10):
             self.car_body.angle += .2  # Turn a little.
             screen.fill(THECOLORS["red"])  # Red is scary!
             draw(screen, self.space)
             self.space.step(1./10)
             if draw_screen:
                 pygame.display.flip()
             clock.tick()
 def recover_from_crash(self, driving_direction, drone_id):
     """
     we hit something, so recover
     """
     #while self.crashed:
     crash_adjust = -100
     # back up
     self.drones[drone_id].velocity = crash_adjust * driving_direction
     #self.crashed = False
     for i in range(10): # was 10
         self.drones[drone_id].angle += .2  # Turn a little.
         screen.fill(THECOLORS["red"])  # Red is scary!
         draw(screen, self.space)
         self.space.step(1./10)
         if draw_screen:
             pygame.display.flip()
         clock.tick()
Esempio n. 6
0
    def frame_step(self, action):
        if action == 0:  # Turn left.
            self.car_body.angle -= .3
        elif action == 1:  # Turn right.
            self.car_body.angle += .3

        #Move obstacles.
        #if self.num_steps % 100 == 0:
            #self.move_obstacles()

        # Move cat.
        #if self.num_steps % 5 == 0:
            #self.move_cat()

        driving_direction = Vec2d(1, 0).rotated(self.car_body.angle)
        self.car_body.velocity = 100 * driving_direction

        # Update the screen and stuff.
        screen.fill(THECOLORS["black"])
        draw(screen, self.space)
        self.space.step(1./10)
        if draw_screen:
            pygame.display.flip()
        clock.tick()

        # Get the current location and the readings there.
        x, y = self.car_body.position
        readings = self.get_sonar_readings(x, y, self.car_body.angle)

        # Set the reward.
        # Car crashed when any reading == 1
        if self.car_is_crashed(readings):
            self.crashed = True
            readings.append(1)
            self.recover_from_crash(driving_direction)
        else:
            readings.append(0)
            
            
        reward = np.dot(self.W, readings)
        state = np.array([readings])

        self.num_steps += 1

        return reward, state, readings
    def frame_step(self, action):
        if action == 0:  # Turn left.
            self.car_body.angle -= .2
        elif action == 1:  # Turn right.
            self.car_body.angle += .2
        
        #Solve car vanishing
        self.solve_vanishing(self.car_body.position)
        
        # Move obstacles.
        if self.num_steps % 100 == 0:
            self.move_obstacles()

        # Move cat.
        if self.num_steps % 5 == 0:
            self.move_cat()

        driving_direction = Vec2d(1, 0).rotated(self.car_body.angle)
        self.car_body.velocity = 100 * driving_direction

        # Update the screen and stuff.
        screen.fill(THECOLORS["black"])
        draw(screen, self.space)
        self.space.step(1./10)
        if draw_screen:
            pygame.display.flip()
        clock.tick()

        # Get the current location and the readings there.
        x, y = self.car_body.position
        readings = self.get_sonar_readings(x, y, self.car_body.angle)
        state = np.array([readings])

        # Set the reward.
        # Car crashed when any reading == 1
        if self.car_is_crashed(readings):
            self.crashed = True
            reward = -500
            self.recover_from_crash(driving_direction)
        else:
            # Higher readings are better, so return the sum.
            reward = -5 + int(self.sum_readings(readings) / 10)
        self.num_steps += 1

        return reward, state
Esempio n. 8
0
	def update(self, surface):
		#lx,ly,rx,ry = self.calcSensorPositions()

		self.space.step(self.STEP_TIME)
			
		# wrap sloppily around edges
		#if (self.body.position.x < 0):
		#	self.position_body((SCREEN_SIZE, self.body.position.y))
		#if (self.body.position.x > SCREEN_SIZE):
		#	self.position_body((0, self.body.position.y))
		#if (self.body.position.y < 0):
		#	self.position_body((self.body.position.x, SCREEN_SIZE))
		#if (self.body.position.y > SCREEN_SIZE):
		#	self.position_body((self.body.position.x, 0))

		#print self.body.position

		pygame_util.draw(surface, self.space)
		pygame_util.draw(surface, self.force_space)
    def frame_step(self, action):
        if action == 0:  # Turn left.
            self.car_body.angle -= .2
        elif action == 1:  # Turn right.
            self.car_body.angle += .2

        driving_direction = Vec2d(1, 0).rotated(self.car_body.angle)

        # Make it get faster over time.
        # self.car_body.velocity = (100 + self.num_steps * speed_multiplier) \
        #    * driving_direction
        self.car_body.velocity = 100 * driving_direction

        # Get the current location and the readings there.
        x, y = self.car_body.position
        readings = self.get_sensor_readings(x, y, self.car_body.angle)
        state = np.array([readings])

        # Breadcrumbs totally break its navigation.
        # if self.num_steps % 10 == 0:
        # self.drop_crumb(x, y)

        # Update the screen and stuff.
        screen.fill(THECOLORS["black"])
        draw(screen, self.space)
        self.space.step(1./10)
        pygame.display.flip()
        clock.tick()

        # Set the reward.
        if self.crashed:
            reward = -500
        else:
            reward = 30 - self.sum_readings(readings)
            # reward = 1

        self.num_steps += 1

        return reward, state
Esempio n. 10
0
    def frame_step(self, cur_mode, turn_action, speed_action, cur_speed, car_distance):
        
        # plot move based on current (active) model prediction
        if cur_mode in [TURN, AVOID, ACQUIRE, HUNT]:
            # action == 0 is continue current trajectory
            if turn_action == 1:  # slight right adjust to current trajectory
                self.car_body.angle -= .2
            elif turn_action == 2:  # hard right
                self.car_body.angle -= .4
            elif turn_action == 3:  # slight left
                self.car_body.angle += .2
            elif turn_action == 4:  # hard left
                self.car_body.angle += .4
        
        if cur_mode in [AVOID, HUNT]: # setting speed value directly see SPEEDS
            if speed_action == 0: # 0 or 30
                cur_speed = SPEEDS[0]
            elif speed_action == 1: # 30 or 50
                cur_speed = SPEEDS[1]
            elif speed_action == 2: # 50 or 70
                cur_speed = SPEEDS[2]
            #elif speed_action == 3: # 70
            #    cur_speed = SPEEDS[3]
        
        # effect move by applying speed and direction as vector on self
        driving_direction = Vec2d(1, 0).rotated(self.car_body.angle)
        self.car_body.velocity = cur_speed * driving_direction
        
        if cur_mode in [TURN, AVOID, HUNT]:
            # move slow obstacles
            if self.num_steps % 20 == 0: # 20x slower than self
                self.move_obstacles()

            # move fast obstacles
            if self.num_steps % 40 == 0: # 40 x more stable than self
                self.move_cats()
       
       # Update the screen and surfaces
        screen.fill(pygame.color.THECOLORS[BACK_COLOR])
        
        if cur_mode in [ACQUIRE, HUNT]:
            
            # draw the path self has taken on the acquire grid
            pygame.draw.lines(path_grid, pygame.color.THECOLORS[PATH_COLOR], True,
                              ((self.last_x, height-self.last_y),
                               (self.cur_x, height-self.cur_y)), 1)
            
            # overlay the path, target surfaces on the screen
            screen.blit(path_grid, (0,0))
            screen.blit(target_grid, (0,0))
        
        draw(screen, self.space)
        self.space.step(1./10) # one pixel for every 10 SPEED
        if draw_screen:
            pygame.display.flip()
        if record_video:
            take_screen_shot(screen, "video")

        self.last_x = self.cur_x; self.last_y = self.cur_y
        self.cur_x, self.cur_y = self.car_body.position

        # get readings from the various sensors
        sonar_dist_readings, sonar_color_readings = \
            self.get_sonar_dist_color_readings(self.cur_x, self.cur_y, self.car_body.angle)
        turn_readings = sonar_dist_readings[:TURN_NUM_SENSOR]
        turn_readings = turn_readings + sonar_color_readings[:TURN_NUM_SENSOR]
        
        avoid_readings = sonar_dist_readings[:AVOID_NUM_SENSOR]
        avoid_readings = avoid_readings + sonar_color_readings[:AVOID_NUM_SENSOR]
        avoid_readings.append(turn_action)
        avoid_readings.append(cur_speed)

        if cur_mode in [ACQUIRE, HUNT]:
            
            # 1. calculate distance and angle to active target(s)
            # a. euclidean distance traveled
            dx = self.current_target[0] - self.cur_x
            dy = self.current_target[1] - self.cur_y
            
            target_dist = ((dx**2 + dy**2)**0.5)
            
            # b. calculate target angle
                # i. relative to car
            rads = atan2(dy,dx)
            rads %= 2*pi
            target_angle_degs = degrees(rads)
            
            if target_angle_degs > 180:
                target_angle_degs = target_angle_degs - 360
            
                # ii. relative to car's current direction
            rads = self.car_body.angle
            rads %= 2*pi
            car_angle_degs = degrees(rads)
            
            if car_angle_degs > 360:
                car_angle_degs = car_angle_degs - 360
            
            # "heading" accounts for angle from car and of car netting degrees car must turn
            heading_to_target = target_angle_degs - car_angle_degs
            if heading_to_target < -180:
                heading_to_target = heading_to_target + 360
            
            # 3. calculate normalized efficiency of last move
            # vs. target acquisition
            dt = self.last_target_dist - target_dist
            
            if abs(dt) >= 12:
                dt = np.mean(self.target_deltas)
            
            # postive distance delta indicates "closing" on the target
            ndt = (dt- np.mean(self.target_deltas)) / np.std(self.target_deltas)
            #ndt = (dt) / np.std(self.target_deltas)
            
            # vs. obstacle avoidance
            do = min(sonar_dist_readings[:HUNT_NUM_SENSOR])
            
            # positive distance delta indicates "avoiding" an obstacle
            ndo = (do - np.mean(self.obstacle_dists)) / np.std(self.obstacle_dists)
            #ndo = (do) / np.std(self.obstacle_dists)
            
            if cur_mode == ACQUIRE:
                acquire_move_efficiency = ndt / target_dist**0.333
                # cubed root of the target distance... lessens effect of distance
            else:
                avoid_move_efficiency = ndo / target_dist**0.333
                acquire_move_efficiency = ndt / target_dist**0.333
                # balancing avoidance with acquisition
                
            self.last_target_dist = target_dist
            self.target_deltas.append(dt)
            if len(self.target_deltas) > STATS_BUFFER:
                self.target_deltas.pop(0)
                                    
            self.last_obstacle_dist = min(sonar_dist_readings[:HUNT_NUM_SENSOR])
            self.obstacle_dists.append(do)
            if len(self.obstacle_dists) > STATS_BUFFER:
                self.obstacle_dists.pop(0)
            
            # 4. if w/in reasonable distance, declare victory
            if target_dist <= self.target_radius:
                print("************** target acquired ************")
                self.target_acquired = True
                #target_dist = 1
        
        if cur_mode == HUNT:
            hunt_readings = sonar_dist_readings[:HUNT_NUM_SENSOR]
            hunt_readings = hunt_readings + sonar_color_readings[:HUNT_NUM_SENSOR]
            hunt_readings.append(target_dist)
            hunt_readings.append(heading_to_target)

        # build states
        turn_state = avoid_state = acquire_state = hunt_state = 0
        turn_state = np.array([turn_readings])
        avoid_state = np.array([avoid_readings])

        if cur_mode in [ACQUIRE, HUNT]:
            acquire_state = np.array([[target_dist, heading_to_target]])
            if cur_mode == HUNT:
                hunt_state = np.array([hunt_readings])
    
        # calculate rewards based on training mode(s) in effect
        reward = reward_turn = reward_avoid = reward_acquire = 0
        
        if cur_mode == AVOID:
            read = sonar_dist_readings[:AVOID_NUM_SENSOR]
        elif cur_mode == HUNT:
            read = sonar_dist_readings[:HUNT_NUM_SENSOR]
        else:
            read = sonar_dist_readings[:TURN_NUM_SENSOR]
        
        if self.car_is_crashed(read):
            # car crashed when any reading == 1. note: change (sensor) readings as needed
            self.crashed = True
            reward = reward_turn = reward_avoid = reward_acquire = -500
            if self.cur_x < 0 or self.cur_x > width or self.cur_y < 0 or self.cur_y > height:
                self.car_body.position = int(width/2), int(height/2)
                self.cur_x, self.cur_y = self.car_body.position
                self.num_off_scrn += 1
                print("off screen. total off screens", self.num_off_scrn)
                reward = reward_turn = reward_avoid = reward_acquire = -1000
            self.recover_from_crash(driving_direction)
            
        else:
            if cur_mode == TURN: # Rewards better spacing from objects
                reward = reward_turn = min(sonar_dist_readings[:TURN_NUM_SENSOR])

            elif cur_mode == AVOID: # rewards distance from objects and speed
                #reward = reward_avoid = min(sonar_dist_readings[:AVOID_NUM_SENSOR])
                sd_speeds = np.std(SPEEDS)
                sd_dist = np.std(range(20))
            
                std_speed = cur_speed / sd_speeds
                std_dist = min(sonar_dist_readings[:TURN_NUM_SENSOR]) / sd_dist
            
                std_max_speed = max(SPEEDS) / sd_speeds
                std_max_dist = SONAR_ARM_LEN / sd_dist
            
                reward = reward_avoid = ((std_speed * std_dist) +
                                         ((std_max_speed - std_speed) * (std_max_dist - std_dist)))

            elif cur_mode in [ACQUIRE, HUNT]: # rewards moving in the right direction and acquiring pixels
                if self.target_acquired == True:
                
                    reward_avoid = reward_acquire = 1000
                
                    # remove acquired pixel
                    self.acquired_pixels.append(self.current_target)
                    self.target_pixels.remove(self.current_target)
                    print("pct complete:", (len(self.acquired_pixels) /
                                        (len(self.acquired_pixels) + len(self.target_pixels))))
                                        
                    if len(self.acquired_pixels) % 50 == 1:
                        take_screen_shot(screen, "snap")
                
                    self.assign_next_target((self.cur_x, self.cur_y), False)
                    self.target_acquired = False
            
                else:
                    if cur_mode == ACQUIRE:
                        reward_acquire = 100 * acquire_move_efficiency
                    elif cur_mode == HUNT:
                        reward_acquire = 50 * acquire_move_efficiency
                        reward_avoid = 50 * avoid_move_efficiency

                if cur_mode == HUNT:
                    if self.num_steps % 10000 == 0 or self.num_steps % 10000 == 1:
                        print("***** reward calcs *****")
                        print("step counter:",self.num_steps)
                        print("target dist:", target_dist)
                        print("dt:", dt)
                        print("mean dist deltas:", np.mean(self.target_deltas))
                        print("std dist deltas:", np.std(self.target_deltas))
                        print("ndt:", ndt)
                        print("min obs dist:", min(sonar_dist_readings[:HUNT_NUM_SENSOR]))
                        print("do:", do)
                        print("mean obs dists:", np.mean(self.obstacle_dists))
                        print("std obs dists:", np.std(self.obstacle_dists))
                        print("ndo:", ndo)
                        print("target dist ** 0.33:", target_dist**0.333)
                        print("acq move eff:", acquire_move_efficiency)
                        print("acq reward:", reward_acquire)
                        print("avd move eff:", avoid_move_efficiency)
                        print("avd reward:", reward_avoid)


                    if self.num_steps == 2000:
                        self.obstacles.append(self.create_obstacle(random.randint(100, width-100), random.randint(70, height-70),30)) # was 35
                        self.target_radius -= 1

                    if self.num_steps == 4000:
                        self.obstacles.append(self.create_obstacle(random.randint(100, width-100),  random.randint(70, height-70),30)) # was 35
                        self.target_radius -= 1

                    if self.num_steps == 6000:
                        self.obstacles.append(self.create_obstacle(random.randint(100, width-100),random.randint(70, height-70),50)) # was 100
                        self.target_radius -= 1

                    if self.num_steps == 8000:
                        self.obstacles.append(self.create_obstacle(random.randint(100, width-100),random.randint(70, height-100),50)) # was 100
                        self.target_radius -= 1

                    if self.num_steps == 10000:
                        self.obstacles.append(self.create_obstacle(random.randint(100, width-100),random.   randint(70, height-70),63)) # was 125
                        self.target_radius -= 1

                    if self.num_steps == 12000:
                        self.obstacles.append(self.create_obstacle(random.randint(100, width-100), random.randint(70, height-70),63)) # was 125
                        self.target_radius -= 1
                            
                    if self.num_steps == 14000:
                        self.cats.append(self.create_cat(width-950,height-100))
                        self.target_radius -= 1
                        
                    if self.num_steps == 16000:
                        self.cats.append(self.create_cat(width-50,height-600))

                    if self.num_steps == 18000:
                        self.cats.append(self.create_cat(width-50,height-100))
                        
                    if self.num_steps == 20000:
                        self.cats.append(self.create_cat(width-50,height-600))
        
        self.num_steps += 1
        clock.tick()
        
        #if cur_speed != 70:
            #take_screen_shot(screen, "snap")
            #print(cur_speed)

        return turn_state, avoid_state, acquire_state, hunt_state, reward, cur_speed, reward_turn, reward_acquire, reward_avoid
    def frame_step(self, action):

        current_action = self.action_memory[action]
        angle = current_action[0]
        speed = current_action[1]

        minN = 3  # Let speed never get over or under a specific value.
        maxN = 50

        self.velocity_changer = max(minN, self.velocity_changer)
        self.velocity_changer = min(maxN, self.velocity_changer)

        if angle == 0:  # Turn left.
            self.car_body.angle -= .2
        elif angle == 1:  # Turn right.
            self.car_body.angle += .2

        if speed == 0:  # Slow down.
            self.velocity_changer -= 1
        elif speed == 1:  # Speed up.
            self.velocity_changer += 1

        # Move obstacles.
        if self.num_steps % 100 == 0:
            self.move_obstacles()

        # Move cat.
        if self.num_steps % 5 == 0:
            self.move_cat()

        # Speed agent.
        driving_direction = Vec2d(1, 0).rotated(self.car_body.angle)
        self.car_body.velocity = (50 + (
            (self.velocity_changer) * 0.005)) * driving_direction

        # Draw screen slows down the training, so only draw screen in final frames training and playing.
        if self.num_steps < 1490000:
            draw_screen = False
            # Update the screen and stuff.
            screen.fill(THECOLORS["black"])
            draw(screen, self.space)
            self.space.step(1. / 10)
            if draw_screen:
                pygame.display.flip()
                clock.tick()

        else:
            draw_screen = True
            # Update the screen and stuff.
            screen.fill(THECOLORS["black"])
            draw(screen, self.space)
            self.space.step(1. / 10)
            if draw_screen:
                pygame.display.flip()
                clock.tick()
            for evt in pygame.event.get():
                if evt.type == pygame.QUIT:
                    pygame.quit()
                    sys.exit()

        # Get the current location and the readings of sonar arms and velocity as state.
        x, y = self.car_body.position
        readings = self.get_sonar_readings(x, y, self.car_body.angle)
        normalized_readings = [(x - 20.0) / 20.0 for x in readings]
        state = np.array([normalized_readings])

        # Set the reward
        if self.car_is_crashed(readings):
            # Car crashed when any reading of sonar arms == 1.
            self.crashed = True
            reward = -1000
            self.recover_from_crash(driving_direction)
        elif self.speed_is_violated():
            # Set low reward if the speedlimit is violated.
            coef_velo_change = 1.3
            reward = -50 - int(self.velocity_changer**coef_velo_change)
            self.num_steps += 1
        elif self.speed_within_limits():
            # Reward is based on the readings (lower reading is better) and the velocity  coefficient (higher velocity is better).
            intercept_reward = -5
            coef_velo_change = 1.738495
            coef_sum_readings = 1.393518

            reward = intercept_reward + int(
                self.velocity_changer**coef_velo_change) + int(
                    self.sum_readings(readings[0:3])**coef_sum_readings)
            self.num_steps += 1

        return reward, state
Esempio n. 12
0
    def frame_step(self, action, action2):
        if action == 0:  # Turn left.
            self.car_body.angle -= .2
        elif action == 1:  # Turn right.
            self.car_body.angle += .2

        elif action2 == 0:  # Turn right.
            self.cat_body.angle += .2  

        elif action2 == 1:  # Turn right.
            self.cat_body.angle += .2
        # Move obstacles.
        # if self.num_steps % 20 == 0:
        #     self.move_obstacles()

        # Move cat.
        # if self.num_steps % 5 == 0:
        #     self.move_cat()

        # if self.num_steps % 5 == 0:
        #     self.move_dog()

        driving_direction = Vec2d(1, 0).rotated(self.car_body.angle)
        self.car_body.velocity = 100 * driving_direction

        moving_direction = Vec2d(1, 0).rotated(self.cat_body.angle)
        self.cat_body.velocity = 80 * moving_direction

        # Update the screen and stuff.
        screen.fill(THECOLORS["black"])
        draw(screen, self.space)
        self.space.step(1./30)  #original 1./10
        if draw_screen:
            pygame.display.flip()
        clock.tick()

        # Get the current location and the readings there.
        x, y = self.car_body.position
        # x_cat, y_cat = self.cat_body.position
        readings = self.get_sonar_readings(x, y, self.car_body.angle)
        # readings.append([x,y])
        # readings.append([x_cat,y_cat])

        state = np.array([readings])
        
        print readings

        # Set the reward.
        # Car crashed when any reading == 1

        if self.car_is_crashed(readings):
            self.crashed = 1
            reward = -500
            self.recover_from_crash(driving_direction)

        # elif self.cat_is_caught(readings):
        #     self.caught = 1
        #     reward = 500
        #     self.recover_from_caught(driving_direction)

        elif self.cat_is_caught(readings):
            self.caught = 1
            reward = 500
            self.recover_from_caught(moving_direction)

        elif readings[0][1] == -5 or readings[1][1] == -5 or readings[2][1] == -5:
            
            reward = 50 - int(self.sum_readings(readings) / 10)

        else:
            # Higher readings are better, so return the sum.
            reward = -12 + int(self.sum_readings(readings) / 10)

        print("current reward: %s" % reward) 
        self.num_steps += 1

        return reward, state
Esempio n. 13
0
File: acar.py Progetto: Silvicek/gym
    def _step(self, action):
        self.last_position = copy.copy(self.car.body.position)
        go = 0
        left = 0
        right = 0
        if action == 0:
            go = 1
        elif action == 1:
            left = 1
        elif action == 2:
            right = -1
        # elif action == 3:
        #     go = -1

        self.car.body.angle += .2 * (left+right)

        # Move dynamic objects
        if self.num_steps % 5 == 0:
            self._move_dynamic()

        driving_direction = Vec2d(1, 0).rotated(self.car.body.angle)
        self.car.body.velocity = int(100 * go) * driving_direction

        # Update the screen and stuff.
        self.space.step(1. / 10)

        self.screen.fill(THECOLORS["black"])
        draw(self.screen, self.space)
        if self.draw_screen:
            pygame.display.flip()
        self.clock.tick()

        # Get the current location and the readings there.
        x, y = self.car.body.position
        xt, yt = self.target.body.position

        readings = self._get_sonar_readings(x, y, self.car.body.angle)
        distance = np.sqrt((x-xt)**2+(y-yt)**2)/100.
        readings += [self._get_angle(), distance]
        state = np.array(readings)

        if self.crashed or self._out_of_bounds():
            if self.num_steps == 0:
                self.reset()
                return self.step(action)
            elif self._out_of_bounds():
                self.crashed = True

        self.num_steps += 1

        r = self.get_reward(action)

        if self.memory_steps > 0:
            self.full_state = shift(self.full_state, self.action_dim+self.observation_dim)
            self.full_state[self.observation_dim:self.observation_dim+self.action_dim] = \
                bin_from_int(self.old_action, self.action_dim)
            self.full_state[:self.observation_dim] = state
            state = self.full_state
        else:
            self.full_state = state

        self.old_action = action

        return state, r, self.crashed, {}
Esempio n. 14
0
    def frame_step(self, input_actions):

        terminal = False
        #print("input_actions: ", input_actions)
        #print("input_actions[2]: ", input_actions[2])
        if sum(input_actions) != 1:
            raise ValueError('Multiple input actions!')

        #Actions: 0 do nothing, 1 turn left, 2 turn right
        if input_actions[1] == 1:
            self.car_body.angle -= .2
        elif input_actions[2] == 1:  # Turn right.
            self.car_body.angle += .2

        # Move obstacles.
        #if self.num_steps % 50 == 0:
            #self.move_obstacles()

        driving_direction = Vec2d(1, 0).rotated(self.car_body.angle)
        self.car_body.velocity = 100 * driving_direction

        # Update the screen and stuff.
        screen.fill(THECOLORS["white"])
        #load the map
        #self.background('scrSht.png', [0,0])
        #screen.blit(self.image, self.rect)
        draw(screen, self.space)
        self.space.step(1./10)
        if draw_screen:
            pygame.display.flip()
        clock.tick()

        # Get the current location and the readings there.
        x, y = self.car_body.position
        readings = self.get_sonar_readings(x, y, self.car_body.angle)
        #readings.append(self.car_body.position.get_dist_sqrd(_goal)/10000)
        #state = np.array([readings])
        state = np.array([[self.car_body.position[0]/width, self.car_body.position[1]/height]])
        '''
        print("============= State: ===================")
        print(state)
        print("calculando distancia")
        print(self.car_body.position.get_dist_sqrd(_goal)/10000)
        '''

        # Set the reward.
        # Car crashed when any reading == 1
        if self.car_is_crashed(readings):
            self.crashed = True
            reward = _crash_reward
            #terminal = True
            self.recover_from_crash(driving_direction)
            #print("=================== Craaaaasssshhhhhhhh!!! ==================")
        elif  ( (_goal[0] - 10) <= self.car_body.position[0] <= (_goal[0] + 10) ) and (_goal[1] - 10) <= self.car_body.position[1] <= (_goal[1] + 10):
            self.goal_hit = True
            reward = _goal_reward
            #print("*********************************************** It got the Goal!!! ************************************************************")
            self.recover_from_goal(driving_direction)
            terminal = True
        #elif self.car_body.position.get_dist_sqrd(_goal) < _old_position.get_dist_sqrd(_goal):
            #reward = 100 / (self.car_body.position.get_dist_sqrd(_goal)/100000)
            #print("Reward comp 2:")
            #print( 100 / (self.car_body.position.get_dist_sqrd(_goal)/100000) )
        else:
            #reward = - (self.car_body.position.get_dist_sqrd(_goal)/50000)
            reward = _normal_reward #( (200 - int(self.sum_readings(readings)) ) /50) - 2

        #print("Reward Total")
        #print(reward)
        #print(state)

        _old_position[0] = self.car_body.position.x
        _old_position[1] = self.car_body.position.y
        #print(self.car_body.position)

        image_data = pygame.surfarray.array3d(pygame.display.get_surface())

        self.num_steps += 1

        #x_t, r_0, terminal = game_state.frame_step(do_nothing)

        return image_data, reward, terminal
Esempio n. 15
0
def evaluate(worldRef, genome, substrate, i, display = False, NNDisplay = False):
	screen = pygame.display.set_mode((600, 600))
	space = pm.Space()
	clock = pygame.time.Clock()
	#print("Starting evaluation ",i)
	net = NEAT.NeuralNetwork()
	#try:
	genome.BuildHyperNEATPhenotype(net, substrate)
	worldRef.displayGraphics = True
	worldRef.resetGameForTraining()
	#print("Game reset for training")
	#counter = 0
	showDisplay = display
	for keeper in worldRef.keeperArray:
		keeper.receiveNN(net)
		
	while worldRef.isGameOver() == False and worldRef.keeperScore <= 10000:
		#print("Entering while game is not over for ",counter,"  time")
		#counter += 1
		for event in pygame.event.get():
			if event.type == pygame.QUIT:
				gameExit = True
		worldRef._sendCalcReceiveDecision()
		worldRef._sendBirdsEyeView()
		#reward = 100000

		for keeper in worldRef.keeperArray:
			keeper.receiveNN(net)
			#print(super(keeper))
			keeper.decisionFlowChart("NEAT trying to move")
		for taker in worldRef.takerArray:
			taker.decisionFlowChart("NEAT trying to move")
	
		'''    
		newBallPoint = kUtil.addVectorToPoint(worldRef.fieldBall.trueBallPos, kUtil.scalarMultiply(worldRef.maxBallSpeed, kUtil.unitVector(worldRef.fieldBall.trueBallDirection)))
		worldRef.fieldBall.updateCoordinate(newBallPoint)
		for i in range(len(worldRef.takerArray)):
			worldRef.takerArray[i].noisyBallPos = kUtil.getNoisyVals(worldRef.fieldBall.trueBallPos, worldRef.takerArray[i].sigma)
		for i in range(len(worldRef.keeperArray)):
			worldRef.keeperArray[i].noisyBallPos = kUtil.getNoisyVals(worldRef.fieldBall.trueBallPos, worldRef.keeperArray[i].sigma)                
		worldRef.updateBallPosession()
		worldRef.updateScore()
		if(worldRef.displayGraphics == True):
			worldRef.drawWorld ()
			worldRef.displayScore()
			pygame.display.update()
		'''
		if(NNDisplay):
			# draw the phenotype
			img = np.zeros((450, 450, 3), dtype=np.uint8)
			img += 10
			NEAT.DrawPhenotype(img, (0, 0, 450, 450), net ,15, 3, substrate)
			cv2.imshow("current best", img)
			cv2.waitKey(1)

			## Draw stuff
			screen.fill(THECOLORS["black"])

			### Draw stuff
			draw(screen, space)

			### Flip screen
			pygame.display.flip()
			clock.tick(10000)

		
		worldRef.commonFunctionality("hyperNEAT",showDisplay,True)
		worldRef.clock.tick(10000)

	#print("Ending Evaluation ",i)

	return worldRef.keeperScore
	'''
Esempio n. 16
0
def evaluate(x):
    gid, genome, space, screen, fast_mode = x
    # Setup the environment
    clock = pygame.time.Clock()

    # The agents - the brain and the ball
    net = NEAT.NeuralNetwork()
    genome.BuildPhenotype(net)

    agent = NN_agent(space, net)
    ball = Ball(space)

    tstep = 0
    bd = 1000000
    while tstep < max_timesteps:
        tstep += 1
        for event in pygame.event.get():
            if event.type == QUIT:
                exit()
            elif event.type == KEYDOWN and event.key == K_ESCAPE:
                exit()
            elif event.type == KEYDOWN and event.key == K_f:
                fast_mode = not fast_mode

        ### Update physics
        dt = 1.0/50.0
        space.step(dt)

        # The NN interacts with the world on each 5 timesteps
        if (tstep % 5) == 0:
            agent.interact(ball)

        if not fast_mode:
            # draw the phenotype
            cv2.imshow("current best", Draw(net))
            cv2.waitKey(1)

            ## Draw stuff
            screen.fill(THECOLORS["black"])

            ### Draw stuff
            draw(screen, space)

            ### Flip screen
            pygame.display.flip()
            clock.tick(50)

        d = np.sqrt((ball.body.position[0] - agent.body.position[0])**2 + (ball.body.position[1] - agent.body.position[1])**2)

        if bd > d: bd = d

    fitness = 10000 - bd

    # draw to the screen all genomes ever
    ### Draw stuff
    draw(screen, space)
    ### Flip screen
    pygame.display.flip()

    # remove objects from space
    space.remove(agent.shape, agent.body)
    space.remove(ball.shape, ball.body)

    return fast_mode, gid, fitness, Behavior(agent.body.position[0], agent.body.position[1])
Esempio n. 17
0
def main():
    ### PyGame init
    pygame.init()
    screen = pygame.display.set_mode((width,height)) 
    clock = pygame.time.Clock()
    running = True
    font = pygame.font.SysFont("Arial", 16)
    
    ### Physics stuff
    space = pymunk.Space()   
    space.gravity = 0,-1000
    # walls - the left-top-right walls
    static= [pymunk.Segment(space.static_body, (50, 50), (50, 550), 5)
                ,pymunk.Segment(space.static_body, (50, 550), (650, 550), 5)
                ,pymunk.Segment(space.static_body, (650, 550), (650, 50), 5)
                ,pymunk.Segment(space.static_body, (50, 50), (650, 50), 5)
                ]  
    
    b2 = pymunk.Body()
    static.append(pymunk.Circle(b2, 30))
    b2.position = 300,400
    
    for s in static:
        s.friction = 1.
        s.group = 1
    space.add(static)
    
    # "Cannon" that can fire arrows
    cannon_body = pymunk.Body(pymunk.inf, pymunk.inf)
    cannon_shape = pymunk.Circle(cannon_body, 25)
    cannon_shape.sensor = True
    cannon_body.position = 100,100
    space.add(cannon_shape)
    
    arrow_body,arrow_shape = create_arrow()
    space.add(arrow_shape)
        
    space.add_collision_handler(0, 1, post_solve=post_solve_arrow_hit)

    flying_arrows = []
    
    while running:
        for event in pygame.event.get():
            if event.type == QUIT or \
                event.type == KEYDOWN and (event.key in [K_ESCAPE, K_q]):  
                running = False
            elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
                start_time = pygame.time.get_ticks()
            elif event.type == KEYDOWN and event.key == K_p:
                pygame.image.save(screen, "arrows.png")
            elif event.type == pygame.MOUSEBUTTONUP and event.button == 1:
                end_time = pygame.time.get_ticks()
                
                diff = end_time - start_time
                power = max(min(diff, 1000), 10) * 1.5
                impulse = power * Vec2d(1,0)
                arrow_body.apply_impulse(impulse.rotated(arrow_body.angle))
                
                space.add(arrow_body)
                flying_arrows.append(arrow_body)
                
                arrow_body, arrow_shape = create_arrow()
                space.add(arrow_shape)
            
        keys = pygame.key.get_pressed()
        
        speed = 2.5
        if (keys[K_UP]):
            cannon_body.position += Vec2d(0,1) * speed
        if (keys[K_DOWN]):
            cannon_body.position += Vec2d(0,-1) * speed
        if (keys[K_LEFT]):
            cannon_body.position += Vec2d(-1,0) * speed
        if (keys[K_RIGHT]):
            cannon_body.position += Vec2d(1,0) * speed
            
        mouse_position = from_pygame( Vec2d(pygame.mouse.get_pos()), screen )
        cannon_body.angle = (mouse_position - cannon_body.position).angle
        # move the unfired arrow together with the cannon
        arrow_body.position = cannon_body.position + Vec2d(cannon_shape.radius + 40, 0).rotated(cannon_body.angle)
        arrow_body.angle = cannon_body.angle
        
        for flying_arrow in flying_arrows:
            drag_constant = 0.0002
            pointing_direction = Vec2d(1,0).rotated(flying_arrow.angle)
            flight_direction = Vec2d(flying_arrow.velocity)
            flight_speed = flight_direction.normalize_return_length()
            dot = flight_direction.dot(pointing_direction)
            # (1-abs(dot)) can be replaced with (1-dot) to make arrows turn around even when fired straight up. 
            # Might not be as accurate, but maybe look better.
            drag_force_magnitude = (1-abs(dot)) * flight_speed **2 * drag_constant * flying_arrow.mass
            
            arrow_tail_position = Vec2d(-50, 0).rotated(flying_arrow.angle)
            flying_arrow.apply_impulse(drag_force_magnitude * -flight_direction, arrow_tail_position)
            
            flying_arrow.angular_velocity *= 0.9
            
        ### Clear screen
        screen.fill(pygame.color.THECOLORS["black"])
        
        ### Draw stuff
        draw(screen, space)
        
        # Power meter
        if pygame.mouse.get_pressed()[0]:
            current_time = pygame.time.get_ticks()
            diff = current_time - start_time
            power = max(min(diff, 1000), 10)
            h = power / 2
            pygame.draw.line(screen, pygame.color.THECOLORS["red"], (30,550), (30,550-h), 10)
                
        # Info and flip screen
        screen.blit(font.render("fps: " + str(clock.get_fps()), 1, THECOLORS["white"]), (0,0))
        screen.blit(font.render("Aim with mouse, hold LMB to powerup, release to fire", 1, THECOLORS["darkgrey"]), (5,height - 35))
        screen.blit(font.render("Press R to reset, ESC or Q to quit", 1, THECOLORS["darkgrey"]), (5,height - 20))
        
        pygame.display.flip()
        
        ### Update physics
        fps = 60
        dt = 1./fps
        space.step(dt)
        
        clock.tick(fps)
Esempio n. 18
0
def main():

    ### PyGame init
    pygame.init()
    screen = pygame.display.set_mode((width,height)) 

    clock = pygame.time.Clock()
    running = True
    font = pygame.font.SysFont("Arial", 16)
    sound = pygame.mixer.Sound("sfx.wav")
    img = pygame.image.load("xmasgirl1.png")
    
    ### Physics stuff
    space = pymunk.Space()   
    space.gravity = 0,-1000
    # box walls 
    static = [pymunk.Segment(space.static_body, (10, 50), (300, 50), 5)
                , pymunk.Segment(space.static_body, (300, 50), (325, 50), 5)
                , pymunk.Segment(space.static_body, (325, 50), (350, 50), 5)
                , pymunk.Segment(space.static_body, (350, 50), (375, 50), 5)
                , pymunk.Segment(space.static_body, (375, 50), (680, 50), 5)
                , pymunk.Segment(space.static_body, (680, 50), (680, 370), 5)
                , pymunk.Segment(space.static_body, (680, 370), (10, 370), 5)
                , pymunk.Segment(space.static_body, (10, 370), (10, 50), 5)
                ]  
    static[1].color = pygame.color.THECOLORS['red']
    static[2].color = pygame.color.THECOLORS['green']
    static[3].color = pygame.color.THECOLORS['red']
    
    # rounded shape
    rounded = [pymunk.Segment(space.static_body, (500, 50), (520, 60), 5)
                , pymunk.Segment(space.static_body, (520, 60), (540, 80), 5)
                , pymunk.Segment(space.static_body, (540, 80), (550, 100), 5)
                , pymunk.Segment(space.static_body, (550, 100), (550, 150), 5)
                ]
                
    # static platforms
    platforms = [pymunk.Segment(space.static_body, (170, 50), (270, 150), 5)
                #, pymunk.Segment(space.static_body, (270, 100), (300, 100), 5)
                , pymunk.Segment(space.static_body, (400, 150), (450, 150), 5)
                , pymunk.Segment(space.static_body, (400, 200), (450, 200), 5)
                , pymunk.Segment(space.static_body, (220, 200), (300, 200), 5)
                , pymunk.Segment(space.static_body, (50, 250), (200, 250), 5)
                , pymunk.Segment(space.static_body, (10, 370), (50, 250), 5)
                ]
    
    for s in static + platforms+rounded:
        s.friction = 1.
        s.group = 1
    space.add(static, platforms+rounded)
    
    # moving platform
    platform_path = [(650,100),(600,200),(650,300)]
    platform_path_index = 0
    platform_body = pymunk.Body(pymunk.inf, pymunk.inf)
    platform_body.position = 650,100
    s = pymunk.Segment(platform_body, (-25, 0), (25, 0), 5)
    s.friction = 1.
    s.group = 1
    s.color = pygame.color.THECOLORS["blue"]
    space.add(s)
    
    # pass through platform
    passthrough = pymunk.Segment(space.static_body, (270, 100), (320, 100), 5)
    passthrough.color = pygame.color.THECOLORS["yellow"]
    passthrough.friction = 1.
    passthrough.collision_type = 2
    passthrough.layers = passthrough.layers ^ 0b1000
    space.add(passthrough)
    
    def passthrough_handler(space, arbiter):
        if arbiter.shapes[0].body.velocity.y < 0:
            return True
        else:
            return False
            
    space.add_collision_handler(1,2, begin=passthrough_handler)
    
    
    # player
    body = pymunk.Body(5, pymunk.inf)
    body.position = 100,100
    
    
    head = pymunk.Circle(body, 10, (0,5))
    head2 = pymunk.Circle(body, 10, (0,13))
    feet = pymunk.Circle(body, 10, (0,-5))

    head.layers = head2.layers = 0b1000
    feet.collision_type = 1
    feet.ignore_draw = head.ignore_draw = head2.ignore_draw = True
    
    space.add(body, head, feet,head2)
    direction = 1
    remaining_jumps = 2
    landing = {'p':Vec2d.zero(), 'n':0}
    frame_number = 0
    
    landed_previous = False
    
    while running:
        
        grounding = {
            'normal' : Vec2d.zero(),
            'penetration' : Vec2d.zero(),
            'impulse' : Vec2d.zero(),
            'position' : Vec2d.zero(),
            'body' : None
        }
        # find out if player is standing on ground
        
                
        def f(arbiter):
            n = -arbiter.contacts[0].normal
            if n.y > grounding['normal'].y:
                grounding['normal'] = n
                grounding['penetration'] = -arbiter.contacts[0].distance
                grounding['body'] = arbiter.shapes[1].body
                grounding['impulse'] = arbiter.total_impulse
                grounding['position'] = arbiter.contacts[0].position
        body.each_arbiter(f)
            
        well_grounded = False
        if grounding['body'] != None and abs(grounding['normal'].x/grounding['normal'].y) < feet.friction:
            well_grounded = True
            remaining_jumps = 2
    
        ground_velocity = Vec2d.zero()
        if well_grounded:
            ground_velocity = grounding['body'].velocity
    
        for event in pygame.event.get():
            if event.type == QUIT or \
                event.type == KEYDOWN and (event.key in [K_ESCAPE, K_q]):  
                running = False
            elif event.type == KEYDOWN and event.key == K_p:
                pygame.image.save(screen, "platformer.png")

            elif event.type == KEYDOWN and event.key == K_d:
                feet.ignore_draw = not feet.ignore_draw
                head.ignore_draw = not head.ignore_draw
                head2.ignore_draw = not head2.ignore_draw
                
            elif event.type == KEYDOWN and event.key == K_UP:
                if well_grounded or remaining_jumps > 0:                    
                    jump_v = math.sqrt(2.0 * JUMP_HEIGHT * abs(space.gravity.y))
                    body.velocity.y = ground_velocity.y + jump_v;
                    remaining_jumps -=1
            elif event.type == KEYUP and event.key == K_UP:                
                body.velocity.y = min(body.velocity.y, JUMP_CUTOFF_VELOCITY)
                
        # Target horizontal velocity of player
        target_vx = 0
        
        if body.velocity.x > .01:
            direction = 1
        elif body.velocity.x < -.01:
            direction = -1
        
        keys = pygame.key.get_pressed()
        if (keys[K_LEFT]):
            direction = -1
            target_vx -= PLAYER_VELOCITY
        if (keys[K_RIGHT]):
            direction = 1
            target_vx += PLAYER_VELOCITY
        if (keys[K_DOWN]):
            direction = -3
            
        feet.surface_velocity = target_vx,0

        
        if grounding['body'] != None:
            feet.friction = -PLAYER_GROUND_ACCEL/space.gravity.y
            head.friciton = HEAD_FRICTION
        else:
            feet.friction,head.friction = 0,0
        
        # Air control
        if grounding['body'] == None:
            body.velocity.x = cpflerpconst(body.velocity.x, target_vx + ground_velocity.x, PLAYER_AIR_ACCEL*dt)
        
        body.velocity.y = max(body.velocity.y, -FALL_VELOCITY) # clamp upwards as well?
        
        # Move the moving platform
        destination = platform_path[platform_path_index]
        current = Vec2d(platform_body.position)
        distance = current.get_distance(destination)
        if distance < PLATFORM_SPEED:
            platform_path_index += 1
            platform_path_index = platform_path_index % len(platform_path)
            t = 1
        else:
            t = PLATFORM_SPEED / distance
        new = current.interpolate_to(destination, t)
        platform_body.position = new
        platform_body.velocity = (new - current) / dt
        
        ### Clear screen
        screen.fill(pygame.color.THECOLORS["black"])
        
        ### Helper lines
        for y in [50,100,150,200,250,300]:
            color = pygame.color.THECOLORS['darkgrey']
            pygame.draw.line(screen, color, (10,y), (680,y), 1)
        
        ### Draw stuff
        draw(screen, space)
        
        if feet.ignore_draw:
            direction_offset = 48+(1*direction+1)/2 * 48
            if grounding['body'] != None and abs(target_vx) > 1:
                animation_offset = 32 * (frame_number / 8 % 4)
            elif grounding['body'] is None:
                animation_offset = 32*1
            else:
                animation_offset = 32*0
            position = body.position +(-16,28)
            screen.blit(img, to_pygame(position, screen), (animation_offset, direction_offset, 32, 48))

        # Did we land?
        if abs(grounding['impulse'].y) / body.mass > 200 and not landed_previous:
            sound.play()
            landing = {'p':grounding['position'],'n':5}
            landed_previous = True
        else:
            landed_previous = False
        if landing['n'] > 0:
            pygame.draw.circle(screen, pygame.color.THECOLORS['yellow'], to_pygame(landing['p'], screen), 5)
            landing['n'] -= 1
        
        # Info and flip screen
        screen.blit(font.render("fps: " + str(clock.get_fps()), 1, THECOLORS["white"]), (0,0))
        screen.blit(font.render("Move with Left/Right, jump with Up, press again to double jump", 1, THECOLORS["darkgrey"]), (5,height - 35))
        screen.blit(font.render("Press D to toggle sprite draw, ESC or Q to quit", 1, THECOLORS["darkgrey"]), (5,height - 20))
        
       
        pygame.display.flip()
        frame_number += 1
        ### Update physics
        
        space.step(dt)
        
        clock.tick(fps)
Esempio n. 19
0
    def frame_step(self, action):

        global last_distance
        global goal
        global width

        if action == 0:  
            self.car_body.angle -= .157
        elif action == 1:
            self.car_body.angle += .157

        # Move obstacles.
        # if self.num_steps % 100 == 0:
        #     self.move_obstacles()

        # Move cat.
#        if self.num_steps % 5 == 0:
#            self.move_cat()

        driving_direction = Vec2d(1, 0).rotated(self.car_body.angle)
        self.car_body.velocity = 20*driving_direction

        # Update the screen and stuff.
        screen.fill(THECOLORS["black"])
        draw(screen, self.space)
        self.space.step(1.0/10)
        if draw_screen:
            pygame.display.flip()
        clock.tick()

        # Get the current location and the readings there.
        x, y = self.car_body.position

        orientation = self.car_body.angle

        distance = math.fabs(x - goal)

        readings = self.get_sonar_readings(x, y, self.car_body.angle)
        normalized_readings = [(x-20.0)/20.0 for x in readings]
        normalized_readings.append(orientation)
        normalized_readings.append(-orientation)
        normalized_readings.append(distance)
        state = np.array([normalized_readings])

        # Set the reward.
        # Car crashed when any reading == 1
        if self.car_is_crashed(readings):
            self.crashed = True
            reward = -500
            self.recover_from_crash(driving_direction, action)
        else:
            # Higher readings are better, so return the sum.
#            reward = -5 + int(self.sum_readings(readings)/10)

            if distance < last_distance:
                reward = last_distance - distance
            else:
                reward = distance - last_distance


        if distance < 10:
            goal = width - goal
            reward = self.last_steps - self.num_steps # reward for reaching the objective faster than last round (may want to scale this)
            self.last_steps = self.num_steps 
            self.num_steps = 0

        self.num_steps += 1
        last_distance = distance

        return reward, state, distance
Esempio n. 20
0
def main():
    ### PyGame init
    pygame.init()
    screen = pygame.display.set_mode((width, height))
    clock = pygame.time.Clock()
    running = True
    font = pygame.font.SysFont("Arial", 16)

    ### Physics stuff
    space = pymunk.Space()
    space.gravity = 0, -1000
    # walls - the left-top-right walls
    static = [
        pymunk.Segment(space.static_body, (50, 50), (50, 550), 5),
        pymunk.Segment(space.static_body, (50, 550), (650, 550), 5),
        pymunk.Segment(space.static_body, (650, 550), (650, 50), 5),
        pymunk.Segment(space.static_body, (50, 50), (650, 50), 5)
    ]

    b2 = pymunk.Body()
    static.append(pymunk.Circle(b2, 30))
    b2.position = 300, 400

    for s in static:
        s.friction = 1.
        s.group = 1
    space.add(static)

    # "Cannon" that can fire arrows
    cannon_body = pymunk.Body(pymunk.inf, pymunk.inf)
    cannon_shape = pymunk.Circle(cannon_body, 25)
    cannon_shape.sensor = True
    cannon_body.position = 100, 100
    space.add(cannon_shape)

    arrow_body, arrow_shape = create_arrow()
    space.add(arrow_shape)

    space.add_collision_handler(0, 1, post_solve=post_solve_arrow_hit)

    flying_arrows = []

    while running:
        for event in pygame.event.get():
            if event.type == QUIT or \
                event.type == KEYDOWN and (event.key in [K_ESCAPE, K_q]):
                running = False
            elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
                start_time = pygame.time.get_ticks()
            elif event.type == KEYDOWN and event.key == K_p:
                pygame.image.save(screen, "arrows.png")
            elif event.type == pygame.MOUSEBUTTONUP and event.button == 1:
                end_time = pygame.time.get_ticks()

                diff = end_time - start_time
                power = max(min(diff, 1000), 10) * 1.5
                impulse = power * Vec2d(1, 0)
                arrow_body.apply_impulse(impulse.rotated(arrow_body.angle))

                space.add(arrow_body)
                flying_arrows.append(arrow_body)

                arrow_body, arrow_shape = create_arrow()
                space.add(arrow_shape)

        keys = pygame.key.get_pressed()

        speed = 2.5
        if (keys[K_UP]):
            cannon_body.position += Vec2d(0, 1) * speed
        if (keys[K_DOWN]):
            cannon_body.position += Vec2d(0, -1) * speed
        if (keys[K_LEFT]):
            cannon_body.position += Vec2d(-1, 0) * speed
        if (keys[K_RIGHT]):
            cannon_body.position += Vec2d(1, 0) * speed

        mouse_position = from_pygame(Vec2d(pygame.mouse.get_pos()), screen)
        cannon_body.angle = (mouse_position - cannon_body.position).angle
        # move the unfired arrow together with the cannon
        arrow_body.position = cannon_body.position + Vec2d(
            cannon_shape.radius + 40, 0).rotated(cannon_body.angle)
        arrow_body.angle = cannon_body.angle

        for flying_arrow in flying_arrows:
            drag_constant = 0.0002
            pointing_direction = Vec2d(1, 0).rotated(flying_arrow.angle)
            flight_direction = Vec2d(flying_arrow.velocity)
            flight_speed = flight_direction.normalize_return_length()
            dot = flight_direction.dot(pointing_direction)
            # (1-abs(dot)) can be replaced with (1-dot) to make arrows turn around even when fired straight up.
            # Might not be as accurate, but maybe look better.
            drag_force_magnitude = (
                1 -
                abs(dot)) * flight_speed**2 * drag_constant * flying_arrow.mass

            arrow_tail_position = Vec2d(-50, 0).rotated(flying_arrow.angle)
            flying_arrow.apply_impulse(
                drag_force_magnitude * -flight_direction, arrow_tail_position)

            flying_arrow.angular_velocity *= 0.9

        ### Clear screen
        screen.fill(pygame.color.THECOLORS["black"])

        ### Draw stuff
        draw(screen, space)

        # Power meter
        if pygame.mouse.get_pressed()[0]:
            current_time = pygame.time.get_ticks()
            diff = current_time - start_time
            power = max(min(diff, 1000), 10)
            h = power / 2
            pygame.draw.line(screen, pygame.color.THECOLORS["red"], (30, 550),
                             (30, 550 - h), 10)

        # Info and flip screen
        screen.blit(
            font.render("fps: " + str(clock.get_fps()), 1, THECOLORS["white"]),
            (0, 0))
        screen.blit(
            font.render("Aim with mouse, hold LMB to powerup, release to fire",
                        1, THECOLORS["darkgrey"]), (5, height - 35))
        screen.blit(
            font.render("Press R to reset, ESC or Q to quit", 1,
                        THECOLORS["darkgrey"]), (5, height - 20))

        pygame.display.flip()

        ### Update physics
        fps = 60
        dt = 1. / fps
        space.step(dt)

        clock.tick(fps)
Esempio n. 21
0
    def __init__(self, track, bot_type):
        """
        Handles simulation and GUI
        :param track: Track object witch configures the scenario
        :param bot_type: Type of bot to be alongside user, can be set to None for no bot
        """

        # Initialize GUI if requested
        if evaluate:
            pygame.init()
            self.screen = pygame.display.set_mode((width, height))
            self.clock = pygame.time.Clock()
            self.screen.set_alpha(None)

        # Initialize class variables
        self.last_checkpoint_distance = 0
        self.current_checkpoint_distance = 0
        self.current_checkpoint = 0
        self.track = track
        self.frame_count = 0
        self.on_track = True
        self.bot_type = bot_type

        self.global_track = Background(self.track.display_img_path, [0, 0])
        self.crashed_single_time = False
        self.max_steps = 3000
        self.crashed = False
        self.punctuation = 0
        self.environment = []
        self.force_switch = True

        # Physics stuff.
        self.space = pymunk.Space()
        self.space.gravity = pymunk.Vec2d(0., 0.)

        self.checkpoints = []

        for pair_of_points in track.checkpoints:
            self.checkpoints.append(self._create_checkpoint(pair_of_points))
        # Record steps.
        self.num_steps = 0

        # More GUI stuff
        if evaluate:
            self.screen.fill(THECOLORS["black"])
            self.screen.blit(self.global_track.image, self.global_track.rect)
            draw(self.screen)
            pygame.display.flip()

        # Track variables
        self.image = Image.open(self.track.mask_img_path)
        self.image = self.image.resize((width, height))
        self.track_rgb = list(self.image.getdata())
        self.off_track_color = get_point_from_rgb_list(0, 0, self.track_rgb)

        # GUI stuff
        if evaluate:
            game_screen = self.screen
        else:
            game_screen = None

        self.parked_cars = []

        # Creates player car
        self.car1 = _Car(self.space,
                         self.track,
                         self.track.car1_position,
                         self.track_rgb,
                         self.off_track_color,
                         self.checkpoints,
                         1000,
                         car_image,
                         screen=game_screen)

        # Initialize bots
        if bot_type is not None:
            if bot_type == 'player2':
                self.car_bot = _Car(self.space,
                                    self.track,
                                    self.track.car2_position,
                                    self.track_rgb,
                                    self.off_track_color,
                                    self.checkpoints,
                                    1000,
                                    bot_image,
                                    screen=game_screen)
            elif bot_type == 'parked_bots':
                for i in range(0, len(self.track.obstacles)):
                    self.parked_cars.append(
                        _ParkedBot(self.space,
                                   self.track,
                                   self.track.obstacles[i],
                                   self.track_rgb,
                                   self.off_track_color,
                                   self.checkpoints,
                                   1001,
                                   bot_image,
                                   screen=game_screen))
            else:
                self.car_bot = _Bot(self.space,
                                    self.track,
                                    self.track.car2_position,
                                    self.track_rgb,
                                    self.off_track_color,
                                    self.checkpoints,
                                    1000,
                                    bot_type,
                                    bot_image,
                                    screen=game_screen)

        self.game_objects = [i for i in self.parked_cars]
        self.game_objects.append(self.car1)

        # Add bots to
        if bot_type is not None and bot_type != 'parked_bots':
            self.game_objects.append(self.car_bot)
    def frame_step(self, action):

        global keyboard_in

        print_stuff = ''

        if action == 0:  # Turn left.
            self.car_body.angle -= .2
        elif action == 1:  # Turn right.
            self.car_body.angle += .2

        # Move obstacles.
        if self.num_steps % 100 == 0:
            self.move_obstacles()

        # Move cat.
        if self.num_steps % 5 == 0:
            self.move_cat()


        driving_direction = Vec2d(1, 0).rotated(self.car_body.angle)
        self.car_body.velocity = 100 * driving_direction


        if action == 3:
            self.car_body.velocity = 0*Vec2d(1, 0).rotated(self.car_body.angle)

        # Update the screen and stuff.
        screen.fill(THECOLORS["black"])
        draw(screen, self.space)
        self.space.step(1./10)
        if draw_screen:
            pygame.display.flip()
        clock.tick()

        # Get the current location and the readings there.
        x, y = self.car_body.position
        xC, yC = self.cat_body.position
        readings = self.get_sonar_readings(x, y, self.car_body.angle)
        color = self.verify_detected(x, y, self.car_body.angle)
        BLE_readings = self.get_BLE_readings(x,y,xC,yC,self.car_body.angle)
        state = np.array([readings+BLE_readings])

        if keyboard_in=='t':
            #os.system("kill -9 %d"%(os.getppid()))
            exit()
        elif keyboard_in=='r':
            print("you killed yourself")
            readings = [1,1,1,1,1]
            keyboard_in = ''

        else:
            pass
        # Set the reward.
        # Car crashed when any reading == 1
        if self.car_is_crashed(readings):
            self.crashed = True
            reward = -50000
            self.recover_from_crash(driving_direction)
        else:
            #Calculate reward
            rtab_sonar[0]=0#(5*rc*mlab.normpdf(readings[0], 20, 2))**2
            rtab_sonar[1]=0#(10*rc*mlab.normpdf(readings[1], 15, 2))**2
            rtab_sonar[2]=(20*rc*mlab.normpdf(readings[2], 15, 2))**2
            rtab_sonar[3]=0#(10*rc*mlab.normpdf(readings[3], 15, 2))**2
            rtab_sonar[4]=0#(5*rc*mlab.normpdf(readings[4], 20, 2))**2
            rtab_sonar[5]=(( (int(self.sum_readings(readings))) /10))
            reward_sonar= sum(rtab_sonar)
            rtab_tabBLE[0]=((150*rc*mlab.normpdf(BLE_readings[0], 215, 100)))**3
            rtab_tabBLE[1]=((150*rc*mlab.normpdf(BLE_readings[1], 175, 100)))**3
            rtab_tabBLE[2]=((150*rc*mlab.normpdf(BLE_readings[2], 215, 100)))**3
            reward_BLE = sum(rtab_tabBLE)

            reward = reward_BLE+reward_sonar
            reward = reward_BLE+reward_sonar

            print_stuff = "\n\n reward BLE :"+str(reward_BLE)+"\n\n reward sonar :"+str(reward_sonar)+"\n\n reward : "+str(reward)+"\n\n RBLE details :"+str(rtab_tabBLE)+"\n\n RSONARS details :"+str(rtab_sonar)+"\n\n state:"+str(state)

        self.num_steps += 1

        return reward, state,print_stuff
Esempio n. 23
0
    def frame_step(self, cur_mode, c1_turn_action, c1_speed_action, c1_cur_speed,
                   c2_turn_action, c2_speed_action, c2_cur_speed, frame_ctr):
        
        # turn car based on current (active) model prediction
        if cur_mode in [TURN, SPEED, ACQUIRE, HUNT, PACK]:
            self.make_turn(c1_turn_action, self.c1_body)
    
        if cur_mode == PACK:
            self.make_turn(c2_turn_action, self.c2_body)
        
        # set speed based on active model prediction
        if cur_mode in [SPEED, HUNT, PACK]: # setting speed values directly see SPEEDS
            self.set_speed(c1_cur_speed, c1_speed_action, self.c1_body)
        
        if cur_mode == PACK:
            self.set_speed(c2_cur_speed, c2_speed_action, self.c2_body)
    
        # move obstacles
        if cur_mode in [TURN, SPEED, HUNT, PACK]:
            # slow obstacles
            if self.num_steps % 20 == 0: # 20x slower than self
                self.move_obstacles()

            # fast obstacles
            if self.num_steps % 40 == 0: # 40 x more stable than self
                self.move_cats()
       
       # update the screen and surfaces
        screen.fill(pygame.color.THECOLORS[BACK_COLOR])
        
        if cur_mode in [ACQUIRE, HUNT, PACK]:
            
            # draw the path c1 has taken on the path grid
            pygame.draw.lines(path_grid, pygame.color.THECOLORS[C1_PATH_COLOR], True,
                              ((self.c1_last_x, height-self.c1_last_y),
                               (self.c1_cur_x, height-self.c1_cur_y)), 1)
                               
            # daw the path c2 has taken
            if cur_mode == PACK:
                pygame.draw.lines(path_grid, pygame.color.THECOLORS[C2_PATH_COLOR], True,
                                  ((self.c2_last_x, height-self.c2_last_y),
                                   (self.c2_cur_x, height-self.c2_cur_y)), 1)
            
            # overlay the path, target surfaces on the screen
            screen.blit(path_grid, (0,0))
            screen.blit(target_grid, (0,0))

        # display results to screen
        draw(screen, self.space)
        self.space.step(1./10) # one pixel for every 10 SPEED
        if draw_screen:
            pygame.display.flip()

        # get readings, build states
        self.c1_last_x = self.c1_cur_x; self.c1_last_y = self.c1_cur_y
        self.c1_cur_x, self.c1_cur_y = self.c1_body.position

        c1_turn_state, c1_speed_state, c1_acquire_state, c1_hunt_state, \
            c1_pack_state, c1_min_sonar_dist, c1_move_efficiency = \
            self.build_states(cur_mode, self.c1_cur_x, self.c1_cur_y, self.c1_body,
                              c1_turn_action, c1_cur_speed, self.c1_last_tgt_dist,
                              self.c1_target_deltas, self.c1_obs_dists)

        if cur_mode == PACK:
            self.c2_last_x = self.c2_cur_x; self.c2_last_y = self.c2_cur_y
            self.c2_cur_x, self.c2_cur_y = self.c2_body.position
            
            c2_turn_state, c2_speed_state, c2_acquire_state, c2_hunt_state, \
                c2_pack_state, c2_min_sonar_dist, c2_move_efficiency = \
                self.build_states(cur_mode, self.c2_cur_x, self.c2_cur_y, self.c2_body,
                                  c2_turn_action, c2_cur_speed, self.c2_last_tgt_dist,
                                  self.c2_target_deltas, self.c2_obs_dists)

        # calculate rewards based on training mode(s) in effect
        c1_turn_reward, c1_speed_reward, c1_acquire_reward, c1_hunt_reward = \
            self.calculate_reward(cur_mode, c1_cur_x, c1_cur_y, self.c1_body,
                                  c1_min_sonar_dist, c1_move_efficiency)

        reward = max([c1_turn_reward, c1_speed_reward, c1_acquire_reward, c1_hunt_reward])

        if cur_mode == PACK:
            c2_turn_reward, c2_speed_reward, c2_acquire_reward, c2_hunt_reward, c2_reward_pack = \
                self.calculate_reward(cur_mode, c2_cur_x, c2_cur_y, self.c2_body,
                                      c2_min_sonar_dist, c2_move_efficiency)
            
            reward = c1_hunt_reward + c2_hunt_reward
        
        self.num_steps += 1
        clock.tick()
        
        #if c1_cur_speed != 70:
            #take_screen_shot(screen)
            #print(c1_cur_speed)

        return c1_turn_state, c1_speed_state, c1_acquire_state, c1_hunt_state, c1_cur_speed, \
            c2_turn_state, c2_speed_state, c2_acquire_state, c2_hunt_state, c2_cur_speed, reward
Esempio n. 24
0
def evaluate(genome, space, screen, fast_mode, start_x, start_vx, bot_startx):
    # Setup the environment
    clock = pygame.time.Clock()

    # The agents - the brain and the ball
    net = NEAT.NeuralNetwork()
    genome.BuildPhenotype(net)

    
    agent = NN_agent(space, net, bot_startx)
    ball = Ball(space, start_x, start_vx)
    
    space.add_collision_handler(collision_type_nn,   collision_type_floor, 
                                agent.touch_floor, None, None, agent.leave_floor)
    space.add_collision_handler(collision_type_ball, collision_type_floor, 
                                ball.touch_floor,  None, None, ball.leave_floor)

    tstep = 0
    avg_ball_height = 0
    while tstep < max_timesteps:
        tstep += 1
        for event in pygame.event.get():
            if event.type == QUIT:
                exit()
            elif event.type == KEYDOWN and event.key == K_ESCAPE:
                exit()
            elif event.type == KEYDOWN and event.key == K_f:
                fast_mode = not fast_mode
            elif event.type == KEYDOWN and event.key == K_LEFT and not fast_mode:
                ball.body.velocity = (ball.body.velocity[0] - 200, ball.body.velocity[1]) 
            elif event.type == KEYDOWN and event.key == K_RIGHT and not fast_mode:
                ball.body.velocity = (ball.body.velocity[0] + 200, ball.body.velocity[1]) 
            elif event.type == KEYDOWN and event.key == K_UP and not fast_mode:
                ball.body.velocity = (ball.body.velocity[0], ball.body.velocity[1] + 200) 

        ### Update physics
        dt = 1.0/50.0
        space.step(dt)
        
        # The NN interacts with the world on each 20 timesteps
        if (tstep % 20) == 0:
            agent.interact(ball)
        avg_ball_height += ball.body.position[1]
            
        # stopping conditions
        if not ball.in_air:
            break
        #if abs(agent.body.velocity[0]) < 50: # never stop on one place!
        #    break
                    
        if not fast_mode:
            # draw the phenotype
            img = np.zeros((250, 250, 3), dtype=np.uint8)
            img += 10
#            NEAT.DrawPhenotype(img, (0, 0, 250, 250), net )
            cv2.imshow("current best", img)
            cv2.waitKey(1)
            
            ## Draw stuff
            screen.fill(THECOLORS["black"])
            
            ### Draw stuff
            draw(screen, space)
            
            ### Flip screen
            pygame.display.flip()
            clock.tick(50)
        
    fitness = tstep #+ avg_ball_height/tstep
    if ball.body.position[1] < 0:
        fitness = 0
    
    # remove objects from space
    space.remove(agent.shape, agent.body)
    space.remove(ball.shape, ball.body)
#    print 'Genome ID:', genome.GetID(), 'Fitness:', tstep
    # the fitness is the number of ticks passed
    return fitness, fast_mode
Esempio n. 25
0
    def __init__(self):
        # Global-ish.
        self.crashed = False
        self.drawoptions = draw(screen)
        # Physics stuff.
        self.space = pymunk.Space()
        self.space.gravity = pymunk.Vec2d(0., 0.)
        #self.space.add_collision_handler(1, 1, post_solve=self.car_crashed)
        #pymunk.CollisionHandler(_handler=self.new_handle,post_solve=self.car_crashed,space = self.space)
        # Create the car.
        self.create_car(50, 50, 0)
        # Record steps.
        self.time = 0
        self.num_steps = 0
        self.goal = (400, 51)
        #self.create_obstacle(self.goal[0],self.goal[1],30)
        # screen.blit(car, ())
        screen.blit(blue, (0, 0))
        screen.blit(blue, (0, 410))
        x, y = self.car_body.position
        print(x, y)
        self.init_heuristic = Vec2d(self.goal[0] - x,
                                    self.goal[1] - y).get_length()
        self.prev_goal_distance = self.init_heuristic
        self.car_body_prev_angle = 0
        # Create walls.
        static = [
            pymunk.Segment(self.space.static_body, (0, 1), (0, height), 1),
            pymunk.Segment(self.space.static_body, (1, height),
                           (width, height), 1),
            pymunk.Segment(self.space.static_body, (width - 1, height),
                           (width - 1, 1), 1),
            pymunk.Segment(self.space.static_body, (1, 1), (width, 1), 1)
        ]
        for s in static:
            s.friction = 1.
            s.group = 1
            s.collision_type = 1
            s.color = THECOLORS['green']
        self.space.add(static)

        # Create some obstacles, semi-randomly.
        # We'll create three and they'll move around to prevent over-fitting.
        self.obstacles = []
        o1x = 50
        o1y = 12.5 + 385

        global prev_dist

        prev_dist = math.sqrt((self.goal[0] - 100) * (self.goal[0] - 100) +
                              (510 - self.goal[1] - 100) *
                              (510 - self.goal[1] - 100))

        self.obstacles.append(self.create_rect_obstacle(o1x, o1y, 100, 25))
        self.obstacles.append(self.create_rect_obstacle(400, 112.5, 100, 25))
        self.obstacles.append(self.create_rect_obstacle(400, o1y, 100, 25))
        self.obstacles.append(self.create_rect_obstacle(760, 112.5, 100, 25))

        self.obstacles.append(self.create_rect_obstacle(610, 255, 80, 25))
        self.obstacles.append(self.create_rect_obstacle(200, 255, 80, 25))

        self.obstacles.append(
            self.create_rect_obstacle(150 + 12.5, 50, 25, 100))
        self.obstacles.append(
            self.create_rect_obstacle(810 - (150 + 12.5), 510 - 50, 25, 100))

        self.obstacles.append(self.center_obstacle())

        self.create_buff_debuff((0, 255, 0), 400, 51, 54, 48)  # buff zone
        # self.create_buff_debuff("green", 400, 51, 54, 48)# buff zone
        self.obstacles.append(self.create_debuff(190, 193.5, 54,
                                                 48))  #debuff zone
        self.create_buff_debuff((255, 255, 0), 50, 336, 54, 48)  #supply zone
        self.create_buff_debuff((255, 255, 0), 400, 510 - 51, 54,
                                48)  #supply zone
        self.obstacles.append(
            self.create_debuff(810 - 190, 510 - 193.5, 54, 48))  #debuff
        self.create_buff_debuff((0, 0, 255), 810 - 50, 510 - 336, 54,
                                48)  #enemy buff
Esempio n. 26
0
def evaluateNovelty(worldRef, genome, substrate, i, display = False, NNDisplay = False):
	global novelSolutions
	global novelGenomes
	global best_genome_ever
	screen = pygame.display.set_mode((600, 600))
	space = pm.Space()
	clock = pygame.time.Clock()
	#print("Starting evaluation ",i)
	net = NEAT.NeuralNetwork()
	#try:
	genome.BuildHyperNEATPhenotype(net, substrate)
	worldRef.displayGraphics = True
	worldRef.resetGameForTraining()
	#print("Game reset for training")
	#counter = 0
	showDisplay = display
	for keeper in worldRef.keeperArray:
		keeper.receiveNN(net)
		
	while worldRef.isGameOver() == False:
		#print("Entering while game is not over for ",counter,"  time")
		#counter += 1
		for event in pygame.event.get():
			if event.type == pygame.QUIT:
				gameExit = True
		worldRef._sendCalcReceiveDecision()
		worldRef._sendBirdsEyeView()
		#reward = 100000

		for keeper in worldRef.keeperArray:
			keeper.receiveNN(net)
			#print(super(keeper))
			keeper.decisionFlowChart("NEAT trying to move")
		for taker in worldRef.takerArray:
			taker.decisionFlowChart("NEAT trying to move")
	
		'''    
		newBallPoint = kUtil.addVectorToPoint(worldRef.fieldBall.trueBallPos, kUtil.scalarMultiply(worldRef.maxBallSpeed, kUtil.unitVector(worldRef.fieldBall.trueBallDirection)))
		worldRef.fieldBall.updateCoordinate(newBallPoint)
		for i in range(len(worldRef.takerArray)):
			worldRef.takerArray[i].noisyBallPos = kUtil.getNoisyVals(worldRef.fieldBall.trueBallPos, worldRef.takerArray[i].sigma)
		for i in range(len(worldRef.keeperArray)):
			worldRef.keeperArray[i].noisyBallPos = kUtil.getNoisyVals(worldRef.fieldBall.trueBallPos, worldRef.keeperArray[i].sigma)                
		worldRef.updateBallPosession()
		worldRef.updateScore()
		if(worldRef.displayGraphics == True):
			worldRef.drawWorld ()
			worldRef.displayScore()
			pygame.display.update()
		'''
		if(NNDisplay):
			# draw the phenotype
			img = np.zeros((450, 450, 3), dtype=np.uint8)
			img += 10
			NEAT.DrawPhenotype(img, (0, 0, 450, 450), net ,15, 3, substrate)
			cv2.imshow("current best", img)
			cv2.waitKey(1)

			## Draw stuff
			screen.fill(THECOLORS["black"])

			### Draw stuff
			draw(screen, space)

			### Flip screen
			pygame.display.flip()
			clock.tick(10000)

		
		worldRef.commonFunctionality("hyperNEAT",showDisplay,True)
		worldRef.clock.tick(10000)

	#print("Ending Evaluation ",i)
	currentHoldList = keeper.getHoldList()
	#compare current hold list against all novel solutions
	noveltyScore = 0
	k = 100
	if len(novelSolutions):
		
		for novelSolution in novelSolutions:
			lenMin = min([len(novelSolution),len(currentHoldList)])
			noveltyMinScore = 9999
			noveltyScore = 0
			for i in range(lenMin):
				noveltyScore += abs(currentHoldList[i][0] - novelSolution[i][0]) + abs(currentHoldList[i][1] - novelSolution[i][1]) 
			noveltyScore += k * (len(currentHoldList) - len(novelSolution))
			if noveltyScore < noveltyMinScore:
				noveltyMinScore = noveltyScore
		noveltyScore = noveltyMinScore
		if (noveltyScore > noveltyThreshold):
			novelSolutions.append(currentHoldList)
			novelGenomes.append(genome)
	else:
		novelSolutions.append(currentHoldList)
		novelGenomes.append(genome)
		noveltyScore = len(currentHoldList)
	
	keeper.clearHoldList()
	if worldRef.keeperScore > novelBestBest :
		best_genome_ever = genome
		setNovelBest(worldRef.keeperScore)
	return noveltyScore
	'''
Esempio n. 27
0
def evaluate(genome, space, screen, fast_mode, start_x, start_vx, bot_startx):
    # Setup the environment
    clock = pygame.time.Clock()

    # The agents - the brain and the ball
    net = NEAT.NeuralNetwork()
    genome.BuildPhenotype(net)

    agent = NN_agent(space, net, bot_startx)
    ball = Ball(space, start_x, start_vx)

    space.add_collision_handler(collision_type_nn, collision_type_floor,
                                agent.touch_floor, None, None,
                                agent.leave_floor)
    space.add_collision_handler(collision_type_ball, collision_type_floor,
                                ball.touch_floor, None, None, ball.leave_floor)

    tstep = 0
    avg_ball_height = 0
    while tstep < max_timesteps:
        tstep += 1
        for event in pygame.event.get():
            if event.type == QUIT:
                exit()
            elif event.type == KEYDOWN and event.key == K_ESCAPE:
                exit()
            elif event.type == KEYDOWN and event.key == K_f:
                fast_mode = not fast_mode
            elif event.type == KEYDOWN and event.key == K_LEFT and not fast_mode:
                ball.body.velocity = (ball.body.velocity[0] - 200,
                                      ball.body.velocity[1])
            elif event.type == KEYDOWN and event.key == K_RIGHT and not fast_mode:
                ball.body.velocity = (ball.body.velocity[0] + 200,
                                      ball.body.velocity[1])
            elif event.type == KEYDOWN and event.key == K_UP and not fast_mode:
                ball.body.velocity = (ball.body.velocity[0],
                                      ball.body.velocity[1] + 200)

        ### Update physics
        dt = 1.0 / 50.0
        space.step(dt)

        # The NN interacts with the world on each 20 timesteps
        if (tstep % 20) == 0:
            agent.interact(ball)
        avg_ball_height += ball.body.position[1]

        # stopping conditions
        if not ball.in_air:
            break
        #if abs(agent.body.velocity[0]) < 50: # never stop on one place!
        #    break

        if not fast_mode:
            # draw the phenotype
            cv2.imshow("current best", Draw(net))
            cv2.waitKey(1)

            ## Draw stuff
            screen.fill(THECOLORS["black"])

            ### Draw stuff
            draw(screen, space)

            ### Flip screen
            pygame.display.flip()
            clock.tick(50)

    fitness = tstep  #+ avg_ball_height/tstep
    if ball.body.position[1] < 0:
        fitness = 0

    # remove objects from space
    space.remove(agent.shape, agent.body)
    space.remove(ball.shape, ball.body)

    return fitness, fast_mode
Esempio n. 28
0
    def frame_step(self, drone_id, turn_action, speed_action, pack_action, cur_speed, total_ctr, replay_ctr):

        self.total_frame_ctr = total_ctr
        self.replay_frame_ctr = replay_ctr
        self.acquire_frame_ctr += 1
        
        # turn drone based on current (active) model prediction
        if cur_mode in [TURN, AVOID, ACQUIRE, HUNT, PACK]:
            self.set_turn(turn_action, drone_id)
    
        # set speed based on active model prediction
        if cur_mode in [AVOID, HUNT, PACK]: # setting speed values directly see SPEEDS
            cur_speed = self.set_speed(speed_action, drone_id)
        
        # effect move by applying speed and direction as vector on self
        driving_direction = Vec2d(1, 0).rotated(self.drones[drone_id].angle)
        self.drones[drone_id].velocity = cur_speed * driving_direction
        x, y = self.drones[drone_id].position
        
        # set heading adjustment based on pack model output
        if cur_mode == PACK:
            heading_adjust = self.set_pack_adjust(pack_action)[drone_id]
        else:
            heading_adjust = 0
        
        # move obstacles
        if cur_mode in [TURN, AVOID, HUNT, PACK]:
            # slow obstacles
            if self.total_frame_ctr % 20 == 0: # 20x slower than self
                self.move_obstacles()

            # fast obstacles
            if self.total_frame_ctr % 40 == 0: # 40 x more stable than self
                self.move_cats()
        
        # update the screen and surfaces
        if drone_id == 0:
            screen.fill(pygame.color.THECOLORS[BACK_COLOR])
        
        if cur_mode in [ACQUIRE, HUNT, PACK]:
            # draw the path drone has taken on the path grid
            if self.acquire_frame_ctr / NUM_DRONES > 1.5:
                pygame.draw.lines(path_grid, pygame.color.THECOLORS[PATH_COLOR], True,
                                  ((self.last_x[drone_id], height - self.last_y[drone_id]),
                                   (x, height - y)), 1)
            
            # if last drone, bind paths, targets to the screen
            if drone_id == (NUM_DRONES - 1):
                screen.blit(path_grid, (0,0))
                screen.blit(target_grid, (0,0))

        # if last drone, display screen
        #if(drone_id == (NUM_DRONES - 1)):
        draw(screen, self.space)
        self.space.step(1./10) # one pixel for every 10 SPEED
        if draw_screen:
            pygame.display.flip()

        # get readings, build states
        self.last_x[drone_id] = x; self.last_y[drone_id] = y
        x, y = self.drones[drone_id].position

        turn_state, avoid_state, acquire_state, hunt_state, drone_state, min_sonar_dist, avoid_move_efficiency, acquire_move_efficiency = \
            self.build_states(drone_id, turn_action, heading_adjust, cur_speed)
        
        # calc rewards based on training mode(s) in effect
        reward = self.calc_rwd(drone_id, min_sonar_dist, driving_direction, cur_speed, avoid_move_efficiency, acquire_move_efficiency)
        
        # introduce obstacles gradually for HUNT/PACK learning
        if cur_mode in [HUNT, PACK] and drone_id == (NUM_DRONES - 1):
            if self.total_frame_ctr > 1 and \
                self.total_frame_ctr < 601 and \
                self.total_frame_ctr % 100 == 0:
                self.obstacles.append(self.create_obstacle(random.randint(200, width-200),
                                                           random.randint(140, height-140),
                                                           OBSTACLE_SIZES[int(self.total_frame_ctr / 100)-1]))
                self.target_radius -= 1
        
            if self.total_frame_ctr > 601 and \
                self.total_frame_ctr < 1001 and \
                self.total_frame_ctr % 100 == 0:

                self.cats.append(self.create_cat(width-500,height-350))

        #self.total_frame_ctr += 1
        clock.tick()

        return turn_state, avoid_state, acquire_state, hunt_state, drone_state, reward, cur_speed
Esempio n. 29
0
    def frame_step(self, action):
        #if action == 0:  # Turn left.
        #    self.car_body.angle -= .2
        #elif action == 1:  # Turn right.
        #    self.car_body.angle += .2

        # Move obstacles.
        #if self.num_steps % 100 == 0:
            #self.move_obstacles()

        # Move cat.
        #if self.num_steps % 5 == 0:
            #self.move_cat()


        #driving_direction = Vec2d(1, 0).rotated(self.car_body.angle)
        #self.car_body.velocity = 100 * driving_direction


        #if action == 3:
            #self.car_body.velocity = 0*Vec2d(1, 0).rotated(self.car_body.angle)

        # Update the screen and stuff.
        screen.fill(THECOLORS["black"])
        draw(screen, self.space)
        #self.space.step(1./10)
        if draw_screen:
            pygame.display.flip()
        clock.tick()

        # Get the current location and the readings there.
        x, y = self.car_body.position
        xC, yC = self.cat_body.position
        readings = self.get_sonar_readings(x, y, self.car_body.angle)
        BLE_readings = self.get_BLE_readings(x,y,xC,yC,self.car_body.angle)
        color = self.detect_with_ble(BLE_readings)
        state = np.array([readings])

        # Set the reward.
        # Car crashed when any reading == 1
        if self.car_is_crashed(readings):
            self.crashed = True
            reward = -500
            #self.recover_from_crash(driving_direction)
        else:
            ############        Reward max*376*    catClose*124      no cat*11  obectClose*
            #We use a gaussian function to set the reward to the maximum value if the user is bellow the car
            rtab_sonar[0]=(1700*mlab.normpdf(readings[0], 20, 2))*color[0]
            rtab_sonar[1]=(3000*mlab.normpdf(readings[1], 15, 2))*color[1]
            rtab_sonar[2]=(6000*mlab.normpdf(readings[2], 15, 2))*color[2]
            rtab_sonar[3]=(3000*mlab.normpdf(readings[3], 15, 2))*color[3]
            rtab_sonar[4]=(1700*mlab.normpdf(readings[4], 20, 2))*color[4]
            rtab_sonar[5]=( (int(self.sum_readings(readings))-5) / 10)
            reward = sum(rtab_sonar)
        #print data
        #print("\n reward:%d" % (reward))
        #print("reward BLE :",reward_BLE)
        #print("reward sonar :",reward_sonar)

        #print("detail reward:",'--',color[0],'--',color[1],'--',color[2],'--',color[3],'--',color[4])
        #print("RBLE details :",rtab_tabBLE)
        #print("RSONARS details :",rtab_sonar)
        #print("state:")
        #print(state)

        #"\n\n reward BLE :"+str(reward_BLE)+"\n\n reward sonar :"+str(reward_sonar)+
        print("car pos"+str([x,y]))
        print_stuff = "\n\n reward sonar :"+str(reward)+"\n\n reward : "+str(reward)+"\n\n RBLE details :"+str(rtab_tabBLE)+"\n\n RSONARS details :"+str(rtab_sonar)+"\n\n state:"+str(state)+"\n\n ble detect:"+str(color)+"\n\n ble readings"+str(BLE_readings)


        self.num_steps += 1

        return reward, state, print_stuff
Esempio n. 30
0
    def frame_step(self, action):
        reward = 0 
        
        if action == 0:  # Turn left.
            self.car_body.angle -= .2
            reward -= 500
        elif action == 1:  # Turn right.
            self.car_body.angle += .2
            reward -= 3 
        elif action == 2: 
            if self.velocity < 30:
                self.velocity += 2
            reward += 5
        elif action == 3: 
            self.velocity -= 2 
            reward -= 3
        else: 
            reward += 5


        #print("PREV: " + str(previous_location)
        previous_location = self.car_body.position[0], self.car_body.position[1]
        
        #print(dist_from_prev)
        driving_direction = Vec2d(1, 0).rotated(self.car_body.angle)
        self.car_body.velocity = self.velocity * driving_direction

        # Update the screen and stuff.
        screen.fill(THECOLORS["black"])
        draw(screen, self.space)
        self.space.step(1./10)
        if draw_screen:
            pygame.display.flip()
        clock.tick()
        
        dist_from_prev = math.hypot(previous_location[0] - self.car_body.position[0], previous_location[1] - self.car_body.position[1])

        # Get the current location and the readings there.
        x, y = self.car_body.position
        readings, walls = self.get_sonar_readings(x, y, self.car_body.angle)
        normalized_readings = [(x-20.0)/20.0 for x in readings] 
        state = np.array([normalized_readings])

        # Set the reward.
        # Car crashed when any reading == 1
        if self.car_is_crashed2(walls): 
            self.crashed = True
            reward = -1000
            self.recover_from_crash(driving_direction)
            print("CRASHED 2222222")
        elif self.car_is_crashed(readings):
            self.crashed = True
            reward = -500
            self.recover_from_crash(driving_direction)
            print("CRASHED 11111")
        elif dist_from_prev < 0.3: 
            reward -= 300
        else:
            # Higher readings are better, so return the sum.
            reward += -5 + int(self.sum_readings(readings) / 10)
            if self.velocity<0: 
                reward -= 10 

        reward += self.velocity/5
        self.num_steps += 1

        return reward, state
    def frame_step(self, action):
        if action == 1:  # Turn left.
            self.car_body.angle -= .3
        elif action == 2:  # Turn right.
            self.car_body.angle += .3
        if action == 3:  # Turn left.
            self.car_body.angle -= .1
        elif action == 4:  # Turn right.
            self.car_body.angle += .1

        # Move obstacles.
        if self.num_steps % 100 == 0:
            self.move_obstacles()

        # Move cat.
        if self.num_steps % 5 == 0:
            self.move_cat()

        driving_direction = Vec2d(1, 0).rotated(self.car_body.angle)
        self.car_body.velocity = 100 * driving_direction

        # Update the screen and stuff.
        screen.fill(THECOLORS["black"])
        draw(screen, self.space)
        self.space.step(1./10)
        if draw_screen:
            pygame.display.flip()
        clock.tick()

        # Get the current location and the readings there.
        x, y = self.car_body.position
        readings = self.get_sonar_readings(x, y, self.car_body.angle)
        state = np.array([readings])

        # Set the reward.
        # Car crashed when any reading == 1
        if self.car_is_crashed(readings):
            self.crashed = True
            reward = -500
            self.recover_from_crash(driving_direction)
        else:
            # Higher readings are better, so return the sum.
            reward = -35.0 + float(self.sum_readings(readings))
            if self.sum_readings(readings) > 40:
                if action == 1 or action == 2:
                        reward -= 0.5
                if action == 3 or action == 4:
                        reward -= 0.25
            elif self.sum_readings(readings) < 30:
                if action == 1 or action == 2:
                        reward += 1
                if action == 3 or action == 4:
                        reward += 1
            elif self.sum_readings(readings) < 24 or readings[0] < 8 or readings[1] < 8 or readings[2] < 8:
                reward += (abs(readings[0] - readings[2])) / 14 + abs(readings[0] + readings[2]) / 16 - 4
                if action == 1 or action == 2:
                        reward += 2
                if action == 3 or action == 4:
                        reward += 1


        self.num_steps += 1

        return reward, state
Esempio n. 32
0
    def frame_step(self, action):

        global keyboard_in

        if action == 0:  # Turn left.
            self.car_body.angle -= .2
        elif action == 1:  # Turn right.
            self.car_body.angle += .2

        # Move obstacles.
        if self.num_steps % 100 == 0:
            self.move_obstacles()

        # Move cat.
        if self.num_steps % 5 == 0:
            self.move_cat()


        driving_direction = Vec2d(1, 0).rotated(self.car_body.angle)
        self.car_body.velocity = 100 * driving_direction


        if action == 3:
            self.car_body.velocity = 0*Vec2d(1, 0).rotated(self.car_body.angle)

        # Update the screen and stuff.
        screen.fill(THECOLORS["black"])
        draw(screen, self.space)
        self.space.step(1./10)
        if draw_screen:
            pygame.display.flip()
        clock.tick()

        # Get the current location and the readings there.
        x, y = self.car_body.position
        readings = self.get_sonar_readings(x, y, self.car_body.angle)
        color = self.verify_detected(x, y, self.car_body.angle)
        state = np.array([readings])

        if keyboard_in=='t':
            #os.system("kill -9 %d"%(os.getppid()))
            exit()
        elif keyboard_in=='r':
            print("you killed yourself")
            readings = [1,1,1,1,1]
            keyboard_in = ''

        else:
            pass
        # Set the reward.
        # Car crashed when any reading == 1
        if self.car_is_crashed(readings):
            self.crashed = True
            reward = -500
            self.recover_from_crash(driving_direction)
        else:
            reward= (((1700*mlab.normpdf(readings[0], 20, 2))*color[0]+(3000*mlab.normpdf(readings[1], 15, 2))*color[1]+(6000*mlab.normpdf(readings[2], 15, 2))*color[2]+(3000*mlab.normpdf(readings[3], 15, 2))*color[3]+(1700*mlab.normpdf(readings[4], 20, 2))*color[4])) +( (int(self.sum_readings(readings))-5) / 10)
        #print data
        #print("\n reward:%d" % (reward))
        #print("\n reading left: ",readings[0])
        #print(" reading mid: ",readings[1])
        #print(" reading right: ",readings[2])


        self.num_steps += 1

        return reward, state
Esempio n. 33
0
 def frame_step(self):
     # TODO: no easy way to reset the angle marker after a collision.
     globals.screen.fill(THECOLORS["black"])
     draw(globals.screen, self.space)
     self.space.step(1. / 10)
     pygame.display.flip()
Esempio n. 34
0
    # prune a bullet if the bullet is outside the screen or has stopped moving
    for b in space.bullets[:]:  # iterate over a copy of the list
        if fabs(b.body.position.x - SCREEN_SIZE) > SCREEN_SIZE or \
           fabs(b.body.position.y - SCREEN_SIZE) > SCREEN_SIZE:
            space.remove_bullet(b)

        #print b.body.velocity.length
        if b.body.velocity.length < BULLET_PRUNING_VELOCITY:
            space.remove_bullet(b)

    # prune a character if the character's head or torso is gone
    for c in space.characters[:]:  # iterate over a copy of the list
        if c.bodies[c.bodies_enum["HEAD"]] not in space.bodies or \
           c.bodies[c.bodies_enum["TORSO"]] not in space.bodies:
            space.remove_character(c)

    for c in space.characters:
        if c.gun:
            c.gun.cooldown_timer = time()

    # blit background
    screen.blit(background, (0, 0))
    # draw stuff
    pygame_util.draw(screen, space)
    # update physics
    space.step(STEP_TIME)
    # flip display
    pygame.display.flip()
    # maintain FPS
    clock.tick(FPS)
Esempio n. 35
0
def evaluate(x):
    gid, genome, space, screen, fast_mode = x
    # Setup the environment
    clock = pygame.time.Clock()

    # The agents - the brain and the ball
    net = NEAT.NeuralNetwork()
    genome.BuildPhenotype(net)

    agent = NN_agent(space, net)
    ball = Ball(space)

    tstep = 0
    bd = 1000000
    while tstep < max_timesteps:
        tstep += 1
        for event in pygame.event.get():
            if event.type == QUIT:
                exit()
            elif event.type == KEYDOWN and event.key == K_ESCAPE:
                exit()
            elif event.type == KEYDOWN and event.key == K_f:
                fast_mode = not fast_mode

        ### Update physics
        dt = 1.0 / 50.0
        space.step(dt)

        # The NN interacts with the world on each 5 timesteps
        if (tstep % 5) == 0:
            agent.interact(ball)

        if not fast_mode:
            # draw the phenotype
            cv2.imshow("current best", Draw(net))
            cv2.waitKey(1)

            ## Draw stuff
            screen.fill(THECOLORS["black"])

            ### Draw stuff
            draw(screen, space)

            ### Flip screen
            pygame.display.flip()
            clock.tick(50)

        d = np.sqrt((ball.body.position[0] - agent.body.position[0])**2 +
                    (ball.body.position[1] - agent.body.position[1])**2)

        if bd > d: bd = d

    fitness = 10000 - bd

    # draw to the screen all genomes ever
    ### Draw stuff
    draw(screen, space)
    ### Flip screen
    pygame.display.flip()

    # remove objects from space
    space.remove(agent.shape, agent.body)
    space.remove(ball.shape, ball.body)

    return fast_mode, gid, fitness, Behavior(agent.body.position[0],
                                             agent.body.position[1])