def startPrey():

    prey_robot = robobo.SimulationRoboboPrey().connect(address='127.0.0.1',
                                                       port=19989)

    prey_controller = prey.Prey(robot=prey_robot, level=0)

    prey_controller.start()
    return (prey_controller, prey_robot)
def main():
    signal.signal(signal.SIGINT, terminate_program)

    rob = robobo.SimulationRobobo().connect(address='127.0.0.1', port=19997)

    rob.play_simulation()

    prey_robot = robobo.SimulationRoboboPrey().connect(address='127.0.0.1',
                                                       port=19989)

    prey_controller = prey.Prey(robot=prey_robot, level=2)

    prey_controller.start()

    for i in range(10):
        print("robobo is at {}".format(rob.position()))
        rob.move(5, 5, 2000)

    prey_controller.stop()
    prey_controller.join()
    prey_robot.disconnect()
    rob.stop_world()

    # time.sleep(10)
    # rob.kill_connections()
    # rob = robobo.SimulationRobobo().connect(address='192.168.1.6', port=19997)
    rob.play_simulation()
    # prey_robot = robobo.SimulationRoboboPrey().connect(address='192.168.1.71', port=19989)
    # prey_controller = prey.Prey(robot=prey_robot, level=2, hardware=True)
    prey_robot = robobo.SimulationRoboboPrey().connect(address='127.0.0.1',
                                                       port=19989)
    prey_controller = prey.Prey(robot=prey_robot, level=2)

    prey_controller.start()
    for i in range(10):
        print("robobo is at {}".format(rob.position()))
        rob.move(5, 5, 2000)
    prey_controller.stop()
    prey_controller.join()

    rob.stop_world()
    def __init__(self, initialPredatorPopulation, initialPreyPopulation):
        pygame.init()
        self.clock = pygame.time.Clock()
        self.surface = pygame.display.set_mode((WIDTH, HEIGHT), 0, 32)
        self.predators = []
        self.prey = []
        self.food = []
        for i in range(initialPredatorPopulation):
            p = predator.Predator(
                (
                    predator.MAX_HEALTH, 
                    predator.VIEW_RADIUS,
                    predator.MAX_VELOCITY,
                    (random.randint(0, WIDTH), random.randint(0, HEIGHT)),
                    (random.uniform(-1, 1) * INIT_VELOCITY, random.uniform(-1, 1) * INIT_VELOCITY),
                    predator.COLOR,
                    predator.SIZE,
                ),
                predator.MAX_DETECTION_OF_PREY  * random.uniform(-1, 1),
                predator.MAX_ATTRACTION_TO_PREY * random.uniform(-1, 1),    
            )
            self.predators.append(p)

        for i in range(initialPreyPopulation):
            p = prey.Prey(
                (
                    prey.MAX_HEALTH, 
                    prey.VIEW_RADIUS,
                    prey.MAX_VELOCITY,
                    (random.randint(0, WIDTH), random.randint(0, HEIGHT)),
                    (random.uniform(-1, 1) * INIT_VELOCITY, random.uniform(-1, 1) * INIT_VELOCITY),
                    prey.COLOR,
                    prey.SIZE,
                ),
                prey.MAX_DETECTION_OF_FOOD       * random.uniform(-1, 1),
                prey.MAX_ATTRACTION_TO_FOOD      * random.uniform(-1, 1),
                prey.MAX_DETECTION_OF_PREDATOR   * random.uniform(-1, 1),
                prey.MAX_REPULSION_FROM_PREDATOR * random.uniform(-1, 1),
            )
            self.prey.append(p)

        for i in range(MAX_FOOD_SUPPLY):
            f = food.Food(
                (random.randint(0, WIDTH), random.randint(0, HEIGHT)),
                food.COLOR,
                food.SIZE
            )
            self.food.append(f)
        
        pygame.display.set_caption('SIMULATOR')
示例#4
0
    def rl(alpha,
           gamma,
           epsilon,
           episodes,
           act_lim,
           param_tuples,
           qL=False,
           no_blocks=0):

        fitness = list()
        rewards = [0]

        for i in range(episodes):
            print('Episode ' + str(i))
            terminate = False

            if use_simulation:
                rob.play_simulation()
                prey_robot = robobo.SimulationRoboboPrey().connect(
                    address='172.17.0.1', port=19989)
            else:
                prey_robot = robobo.HardwareRobobo(camera=True).connect(
                    address="10.15.3.48")  # Connect to different ip?
            prey_controller = prey.Prey(robot=prey_robot, level=2)
            prey_controller.start(
            )  # Not sure yet if this is indeed needed for both simu and hardware

            current_color_space = get_color_info()
            current_sensor_info = get_sensor_info('front_3')
            current_state = make_discrete(current_sensor_info, boundary_sensor,
                                          current_color_space,
                                          boundaries_color)

            if str(current_state) not in state_table.keys():
                state_table[str(current_state)] = [0 for _ in range(3)]

            action = epsilon_policy(current_state, epsilon)
            # current_collected_food = rob.collected_food() if use_simulation else 0
            # initialise state if it doesn't exist, else retrieve the current q-value
            x = 0
            while not terminate:

                take_action(action)
                # new_collected_food = rob.collected_food() if use_simulation else 0

                # Whole img extracted to get reward value
                # left, mid, right extracted to save state space accordingly

                new_color_space = get_color_info()
                new_sensor_info = get_sensor_info('front_3')
                new_state = make_discrete(new_sensor_info, boundary_sensor,
                                          new_color_space, boundaries_color)

                if str(new_state) not in state_table.keys():
                    state_table[str(new_state)] = [0 for _ in range(3)]

                new_action = epsilon_policy(new_state, epsilon)

                # Retrieve the max action if we use Q-Learning
                max_action = np.argmax(
                    state_table[str(new_state)]) if qL else new_action

                # Get reward
                r = get_reward(current_state, new_state, current_sensor_info,
                               new_sensor_info, action, new_action,
                               current_color_space, new_color_space)
                print("State and obtained Reward: ", new_state, r)

                # norm_r = normalize(r, -30, 20)
                #
                # if i != 0:
                #     fitness.append(no_blocks / i)
                # else:
                #     fitness.append(float(0))
                # # print(fitness)
                # if rewards:
                #     rewards.append(rewards[-1] + norm_r)
                # else:
                #     rewards.append(norm_r)

                # Update rule
                if not run_test:
                    # print('update')
                    state_table[str(current_state)][action] += \
                        alpha * (r + (gamma *
                                      np.array(
                                          state_table[str(new_state)][max_action]))
                                 - np.array(state_table[str(current_state)][action]))

                # Stop episode if we get very close to an obstacle
                if (max(new_state[:3]) == 2 and max(new_state[3:]) != 2
                        and use_simulation) or x == act_lim - 1:
                    state_table[str(new_state)][new_action] = -10
                    terminate = True
                    print("done")
                    if not run_test:
                        print('writing json')
                        with open(q_table_file, 'w') as json_file:
                            json.dump(state_table, json_file)

                    if use_simulation:
                        print("stopping the simulation")
                        prey_controller.stop()
                        prey_controller.join()
                        prey_robot.disconnect()
                        rob.stop_world()
                        while not rob.is_sim_stopped():
                            print("waiting for the simulation to stop")
                        time.sleep(2)

                # update current state and action
                current_state = new_state
                current_sensor_info = new_sensor_info
                action = new_action
                current_color_space = new_color_space

                # increment action limit counter
                x += 1

        return fitness, rewards
示例#5
0
def main():
    signal.signal(signal.SIGINT, terminate_program)

    # rob = robobo.HardwareRobobo(camera=True).connect(address="192.168.1.7")
    rob = robobo.SimulationRobobo().connect(address='192.168.1.6', port=19997)

    rob.play_simulation()

    # ALWAYS connect first to the real robot, then start the simulation and only then connect to the prey
    # if the order is not respected, an error is raised and I do not why
    # if you use the provided scene, do not change the port number
    # if you want to build your own scene, remember to modify the prey port number on vrep
    prey_robot = robobo.SimulationRoboboPrey().connect(address='192.168.1.6', port=19989)
    #
    # rob.pause_simulation()

    # move and talk
    # rob.set_emotion('sad')

    # initialise class prey
    # needs to receive the robot to move
    # there are 5 levels of difficulties. From 0 (super easy) to 4 (hard).
    # you can select the one you want,using a parameter in the following constructor, default is 2
    prey_controller = prey.Prey(robot=prey_robot, level=2)
    # start the thread prey
    # makes the prey move
    prey_controller.start()
    # print("start")

    for i in range(10):
            print("robobo is at {}".format(rob.position()))
            rob.move(5, 5, 2000)
    #
    # print("robobo is at {}".format(rob.position()))
    # rob.sleep(1)

    # # Following code moves the phone stand
    # rob.set_phone_pan(343, 100)
    # rob.set_phone_tilt(109, 100)
    # time.sleep(1)
    # rob.set_phone_pan(11, 100)
    # rob.set_phone_tilt(26, 100)

    # rob.talk('Hi, my name is Robobo')
    # rob.sleep(1)
    # rob.set_emotion('happy')

    # Following code gets an image from the camera
    # image = rob.get_image_front()
    # cv2.imwrite("test_pictures.png",image)

    # time.sleep(0.1)

    # # IR reading
    # for i in range(1000000):
    #     print("ROB Irs: {}".format(np.log(np.array(rob.read_irs()))/10))
    #     time.sleep(0.1)

    # stop the prey
    # if you want to stop the prey you have to use the two following commands
    prey_controller.stop()
    prey_controller.join()
    prey_robot.disconnect()


    # pause the simulation and read the collected food
    # rob.pause_simulation()
    # print("Robobo collected {} food".format(rob.collected_food()))

    # Stopping the simualtion resets the environment
    rob.stop_world()

    time.sleep(10)
    # rob.kill_connections()
    # rob = robobo.SimulationRobobo().connect(address='192.168.1.6', port=19997)
    rob.play_simulation()
    # prey_robot = robobo.SimulationRoboboPrey().connect(address='192.168.1.71', port=19989)
    # prey_controller = prey.Prey(robot=prey_robot, level=2, hardware=True)
    prey_robot = robobo.SimulationRoboboPrey().connect(address='192.168.1.6', port=19989)
    prey_controller = prey.Prey(robot=prey_robot, level=2)

    prey_controller.start()
    for i in range(10):
            print("robobo is at {}".format(rob.position()))
            rob.move(5, 5, 2000)
    prey_controller.stop()
    prey_controller.join()

    rob.stop_world()
示例#6
0
    def rl(alpha, gamma, epsilon, episodes, act_lim, exp, qL=False):

        fitness = list()
        rewards = [0]

        for i in range(episodes):
            print('Episode ' + str(i))
            terminate = False
            prey_robot = []
            if use_simulation:
                rob.play_simulation()
                prey_robot = robobo.SimulationRoboboPrey().connect(
                    address=virtual_ip, port=19989)
            if not use_simulation:
                prey_robot = robobo.HardwareRobobo().connect(
                    address=robot_prey)
            prey_controller = prey.Prey(
                robot=prey_robot, level=2) if use_simulation else prey.Prey(
                    robot=prey_robot, level=4)
            prey_controller.start()
            current_color_space = get_color_info()
            current_sensor_info = get_sensor_info('front_3')
            current_state = make_discrete(current_sensor_info, boundary_sensor,
                                          current_color_space,
                                          boundaries_color)

            if str(current_state) not in state_table.keys():
                state_table[str(current_state)] = [0 for _ in range(3)]

            action = epsilon_policy(current_state, epsilon)
            x = 0
            while not terminate:

                take_action(action)
                # new_collected_food = rob.collected_food() if use_simulation else 0

                # Whole img extracted to get reward value
                # left, mid, right extracted to save state space accordingly

                new_color_space = get_color_info()
                new_sensor_info = get_sensor_info('front_3')
                new_state = make_discrete(new_sensor_info, boundary_sensor,
                                          new_color_space, boundaries_color)

                if str(new_state) not in state_table.keys():
                    state_table[str(new_state)] = [0 for _ in range(3)]

                new_action = epsilon_policy(new_state, epsilon)

                # Retrieve the max action if we use Q-Learning
                max_action = np.argmax(
                    state_table[str(new_state)]) if qL else new_action

                # Get reward
                r = get_reward(current_state, new_state, current_sensor_info,
                               new_sensor_info, action, new_action,
                               current_color_space, new_color_space)
                print("State and obtained Reward: ", new_state, r)

                norm_r = normalize(r, MIN_REWARD, MAX_REWARD)
                norm_steps = normalize(x, MIN_TIMESTEPS + 0.01, MAX_TIMESTEPS)
                fitness.append(norm_r / norm_steps)

                # Update rule
                if not run_test:
                    # print('update')
                    state_table[str(current_state)][action] += \
                        alpha * (r + (gamma *
                                      np.array(
                                          state_table[str(new_state)][max_action]))
                                 - np.array(state_table[str(current_state)][action]))

                # Stop episode if we get very close to an obstacle
                if (max(new_state[:3]) == 2 and max(new_state[3:]) != 2
                        and use_simulation) or x == act_lim - 1:
                    # state_table[str(new_state)][new_action] = -10
                    terminate = True
                    print("done")
                    if not run_test:
                        print('writing json')
                        with open(
                                './src/task3_results/state_table_e' +
                                str(epsilon) + '_exp_' + str(exp) + '.json',
                                'wb') as json_file:
                            json.dump(state_table, json_file)

                    if use_simulation:
                        print("stopping the simulation")
                        prey_controller.stop()
                        prey_controller.join()
                        prey_robot.disconnect()
                        rob.stop_world()
                        while not rob.is_sim_stopped():
                            print("waiting for the simulation to stop")
                        time.sleep(2)

                # update current state and action
                current_state = new_state
                current_sensor_info = new_sensor_info
                action = new_action
                current_color_space = new_color_space

                # increment action limit counter
                x += 1

        return fitness, rewards
def main():
    signal.signal(signal.SIGINT, terminate_program)

    # rob = robobo.HardwareRobobo(camera=True).connect(address="192.168.1.7")
    rob = robobo.SimulationRobobo(number='').connect(address='127.0.0.1',
                                                     port=19997)
    rob.play_simulation()

    rob.set_phone_tilt(0.8, 50)
    image = rob.get_image_front()

    # mask = cv2.inRange(image, (0, 140, 0), (20, 255, 20))
    mask = cv2.inRange(image, (0, 0, 80), (60, 60, 255))
    # mask = cv2.bitwise_not(mask, mask)
    # masked = cv2.bitwise_and(image, image, mask=mask)
    cv2.imshow('9', mask)
    cv2.imshow('1', image)
    cv2.waitKey(0)  # waits until a key is pressed
    cv2.destroyAllWindows()
    exit()
    # prey_robot = robobo.SimulationRoboboPrey().connect(address='192.168.1.71', port=19989)
    # prey_controller = prey.Prey(robot=prey_robot, level=2, hardware=True)
    prey_robot = robobo.SimulationRoboboPrey('#0').connect(address='127.0.0.1',
                                                           port=19989)
    prey_controller = prey.Prey(robot=prey_robot, level=2)

    prey_controller.start()
    for i in range(5000):
        print("robobo is at {}".format(rob.position()))
        rob.move(90, 90, millis=200)
    prey_controller.stop()
    prey_controller.join()
    prey_robot.disconnect()

    rob.stop_world()
    # move and talk
    # rob.set_emotion('sad')

    # initialise class prey
    # needs to receive the robot to move
    # there are 5 levels of difficulties. From 0 (super easy) to 4 (hard).
    # you can select the one you want,using a parameter in the following constructor, default is 2
    prey_controller = prey.Prey(robot=prey_robot, level=2)
    # start the thread prey
    # makes the prey move
    prey_controller.start()
    # print("start")

    for i in range(10):
        print("robobo is at {}".format(rob.position()))
        rob.move(5, 5, 2000)
    #
    # print("robobo is at {}".format(rob.position()))
    # rob.sleep(1)

    # # Following code moves the phone stand
    # rob.set_phone_pan(343, 100)
    # rob.set_phone_tilt(109, 100)
    # time.sleep(1)
    # rob.set_phone_pan(11, 100)
    # rob.set_phone_tilt(26, 100)

    # rob.talk('Hi, my name is Robobo')
    # rob.sleep(1)
    # rob.set_emotion('happy')

    # Following code gets an image from the camera
    # image = rob.get_image_front()
    # cv2.imwrite("test_pictures.png",image)

    # time.sleep(0.1)

    # # IR reading
    # for i in range(1000000):
    #     print("ROB Irs: {}".format(np.log(np.array(rob.read_irs()))/10))
    #     time.sleep(0.1)

    # stop the prey
    # if you want to stop the prey you have to use the two following commands
    prey_controller.stop()
    prey_controller.join()
    prey_robot.disconnect()

    # pause the simulation and read the collected food
    # rob.pause_simulation()
    # print("Robobo collected {} food".format(rob.collected_food()))

    # Stopping the simualtion resets the environment
    rob.stop_world()

    time.sleep(10)
    # rob.kill_connections()
    # rob = robobo.SimulationRobobo().connect(address='192.168.1.6', port=19997)
    rob.play_simulation()

    rob.set_phone_tilt(0.8, 50)
    # prey_robot = robobo.SimulationRoboboPrey().connect(address='192.168.1.71', port=19989)
    # prey_controller = prey.Prey(robot=prey_robot, level=2, hardware=True)
    prey_robot = robobo.SimulationRoboboPrey().connect(address='192.168.1.6',
                                                       port=19989)
    prey_controller = prey.Prey(robot=prey_robot, level=2)

    prey_controller.start()
    for i in range(10):
        print("robobo is at {}".format(rob.position()))
        rob.move(90, 90, millis=200)
    prey_controller.stop()
    prey_controller.join()

    rob.stop_world()
示例#8
0
文件: util.py 项目: jxcl/flock_game
def populate_prey(l, config):
    for x in range(config["num_birds"]):
        b = prey.Prey(random_position(config), (random.randint(1, 5), random.randint(1, 5)), x, config)
        l.append(b)