示例#1
0
def calculate_new_pose(pose, wheel_speeds, dt):
    radius = 26
    new_pose = Pose(pose.x, pose.y, pose.theta)
    vl = wheel_speeds[0]
    vr = wheel_speeds[1]
    new_pose.x = pose.x + (0.5 * (vl + vr) * cos(pose.theta))
    new_pose.y = pose.y + (0.5 * (vl + vr) * sin(pose.theta))
    new_pose.theta = pose.theta - (0.5*(vl - vr)/(2*radius))
    return new_pose
示例#2
0
def load_kitty_poses(location, pose_file):
    poses = SortedDict()
    filename = '{}/poses/{}.txt'.format(location, pose_file)
    id = 0
    with open(filename, 'r') as fp:
        for line in fp.readlines():
            poses[id] = Pose(line, location, pose_file)
            id += 1

    return poses
示例#3
0
    def reset(self, is_learning=True):
        """
        Resets the simulation.

        :param is_learning: if the robot is learning in this episode.
        :type is_learning: bool.
        """
        start = self.track.get_initial_point()
        self.line_follower.reset(Pose(start.x, start.y, 0.0), is_learning)
        self.point_list = []
    def reset(self, controller_params=None):
        """
        Resets the simulation.
        Changing controller parameters is optional. If no controller parameters is passed when calling this
        method, the previous controller parameters will be maintained.

        :param controller_params: new controller parameters.
        :type controller_params: Params.
        """
        start = self.track.get_initial_point()
        self.line_follower.reset(Pose(start.x, start.y, 0.0), controller_params)
        self.point_list = []
示例#5
0
    def __init__(self, line_follower, track):
        """
        Creates the simulation.

        :param line_follower: the line follower robot.
        :type line_follower: LineFollower.
        :param track: the line track.
        :type track: Track.
        """
        self.line_follower = line_follower
        self.track = track
        start = self.track.get_initial_point()
        self.line_follower.reset(Pose(start.x, start.y, 0.0))
        self.point_list = []  # To draw the robot's path
        # Defining the sprite parameters
        sprite_params = Params()
        sprite_params.wheel_thickness = 0.01
        sprite_params.sensor_thickness = 0.02
        sprite_params.wheel_radius = line_follower.wheel_radius
        sprite_params.wheels_distance = line_follower.wheels_distance
        sprite_params.sensor_offset = line_follower.sensor_offset
        sprite_params.array_width = line_follower.line_sensor.array_width
        # Creating the robot sprite
        self.sprite = RobotSprite(sprite_params)
from constants import FREQUENCY
from roomba import Roomba
from simulation import *
from state_machine import FiniteStateMachine, MoveForwardState

pygame.init()

window = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))

pygame.display.set_caption("Lab 1 - Roomba Finite State Machine")

clock = pygame.time.Clock()

behavior = FiniteStateMachine(MoveForwardState())
# behavior = RoombaBehaviorTree()
pose = Pose(PIX2M * SCREEN_WIDTH / 2.0, PIX2M * SCREEN_HEIGHT / 2.0, 0.0)
roomba = Roomba(pose, 1.0, 2.0, 0.34 / 2.0, behavior)
simulation = Simulation(roomba)

run = True

while run:
    clock.tick(FREQUENCY)

    for event in pygame.event.get():
        if event.type == pygame.QUIT:
            run = False

    simulation.update()
    draw(simulation, window)
示例#7
0
def dantam_distract(env, n_obj):
    env.Load(os.path.join(ENVIRONMENTS_DIR, 'empty.xml'))

    m, n = 3, 3
    side_dim = .07
    height_dim = .1
    box_dims = (side_dim, side_dim, height_dim)
    separation = (side_dim, side_dim)

    coordinates = list(product(range(m), range(n)))
    assert n_obj <= len(coordinates)
    obj_coordinates = sample(coordinates, n_obj)

    length = m * (box_dims[0] + separation[0])
    width = n * (box_dims[1] + separation[1])
    height = .7
    table = box_body(env, 'table', length, width, height, color=TAN)
    set_pose(table, pose_from_quat_point(unit_quat(), np.array([0, 0, 0])))
    env.Add(table)

    robot = env.GetRobots()[0]
    set_manipulator_conf(robot.GetManipulator('leftarm'), TOP_HOLDING_LEFT_ARM)
    open_gripper(robot.GetManipulator('leftarm'))
    set_manipulator_conf(robot.GetManipulator('rightarm'),
                         mirror_arm_config(robot, REST_LEFT_ARM))
    close_gripper(robot.GetManipulator('rightarm'))
    robot.SetDOFValues([.15], [robot.GetJointIndex('torso_lift_joint')])
    set_base_conf(robot, (-.75, .2, -math.pi / 2))

    poses = []
    z = height + SURFACE_Z_OFFSET
    for r in range(m):
        row = []
        x = -length / 2 + (r + .5) * (box_dims[0] + separation[0])
        for c in range(n):
            y = -width / 2 + (c + .5) * (box_dims[1] + separation[1])
            row.append(Pose(pose_from_quat_point(
                unit_quat(), np.array([x, y, z]))))
        poses.append(row)

    initial_poses = {}
    goal_poses = {}
    for i, (r, c) in enumerate(obj_coordinates):
        row_color = np.zeros(4)
        row_color[2 - r] = 1.
        if i == 0:
            name = 'goal%d-%d' % (r, c)
            color = BLUE
            goal_poses[name] = poses[m / 2][n / 2]
        else:
            name = 'block%d-%d' % (r, c)
            color = RED
        initial_poses[name] = poses[r][c]
        obj = box_body(env, name, *box_dims, color=color)
        set_pose(obj, poses[r][c].value)
        env.Add(obj)

    known_poses = list(flatten(poses))
    object_names = initial_poses.keys()
    known_grasps = list(
        map(Grasp, top_grasps(env.GetKinBody(object_names[0]))))

    return ManipulationProblem(object_names=object_names, table_names=[table.GetName()],
                               goal_poses=goal_poses, initial_poses=initial_poses, known_poses=known_poses, known_grasps=known_grasps)
controller_params.kp = 50.0
controller_params.ki = 0.0
controller_params.kd = 3.0
# Defining robot parameters
robot_params = Params()
robot_params.sensor_offset = 0.05
robot_params.max_wheel_speed = 45.0
robot_params.wheel_radius = 0.02
robot_params.wheels_distance = 0.05
robot_params.wheel_bandwidth = 10.0 * 2.0 * pi
# Defining line sensor parameters
sensor_params = Params()
sensor_params.sensor_range = 0.015
sensor_params.num_sensors = 7
sensor_params.array_width = 0.06
line_follower = LineFollower(Pose(0.5, 0.5, 45.0 * pi / 180.0),
                             controller_params, robot_params, sensor_params)

# Defining PSO hyperparameters
hyperparams = Params()
hyperparams.num_particles = 40
hyperparams.inertia_weight = 0.5
hyperparams.cognitive_parameter = 0.6
hyperparams.social_parameter = 0.8
lower_bound = np.array([0.0, 10.0, 0.0, 0.0])
upper_bound = np.array([0.9, 200.0, 1300.0, 30.0])
pso = ParticleSwarmOptimization(hyperparams, lower_bound, upper_bound)

# Creating track
# Switch to simple track if you are having trouble to make the robot learn in the complex track
track = create_complex_track()  # create_simple_track()
示例#9
0
    coverage_grid = OccupancyGrid()
    setupOccGrid(coverage_grid, vision_sensor)

    obstacle_mask = coverage_grid.grid == 0
    mask = obstacle_mask

    # Set obstacles to -1 for coverage calculations
    coverage_grid.grid *= -1

    coverage_grid.setup_drawing()

    for (p_x, p_y), layer in path:

        # Move the robot along the path
        wx, wy = occ_grid.mapToWorld(p_x, p_y)
        pose = Pose(wx, wy, -layer * math.pi / 8)
        set2DPose(robot, pose)

        # Visualization
        new_coverage = OccupancyGrid()
        setupOccGrid(new_coverage, vision_sensor)
        prev_grid = np.array(coverage_grid.grid, copy=True)
        coverage_grid.grid[mask] += new_coverage.grid[mask]

        new_mask = new_coverage.grid == 0
        mask = np.logical_and(obstacle_mask, new_mask)
        coverage_grid.draw()

        pr.step()

    # End Simulations
示例#10
0
# Defining robot parameters
robot_params = Params()
robot_params.sensor_offset = 0.05
robot_params.max_wheel_speed = 45.0
robot_params.wheel_radius = 0.02
robot_params.wheels_distance = 0.05
robot_params.wheel_bandwidth = 10.0 * 2.0 * pi
# Defining line sensor parameters
sensor_params = Params()
sensor_params.sensor_range = 0.015
sensor_params.num_sensors = 7
sensor_params.array_width = 0.06
# Defining controller params (including learning algorithm)
linear_speed = 0.7
rl_algorithm = configure_rl_algorithm()
line_follower = LineFollower(Pose(0.0, 0.0, 0.0), rl_algorithm, linear_speed, robot_params, sensor_params)

# Creating track
# Switch to simple track if you are having trouble to make the robot learn in the complex track
track = create_complex_track()  # create_simple_track()

# Creating the simulation
simulation = Simulation(line_follower, track)

# Initializing pygame
pygame.init()
window = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
pygame.display.set_caption("Lab 12 - Model-Free RL Algorithms")
clock = pygame.time.Clock()
font = pygame.font.SysFont('Arial', 20, True)
示例#11
0
  setupOccGrid(coverage_grid, vision_sensor)

  obstacle_mask = coverage_grid.grid == 0
  mask = obstacle_mask

  # Set obstacles to -1 for coverage calculations
  coverage_grid.grid *= -1

  coverage_grid.setup_drawing()

  # TODO: Rotational interpolation
  for p in path:

    # Move the robot along the path
    wx, wy = occ_grid.mapToWorld(p[0], p[1])
    pose = Pose(wx, wy, math.radians(p[2] * 90))
    # print(wx, wy)
    set2DPose(robot, pose)
    pr.step()
    # time.sleep(0.01)

    # Visualization
    new_coverage = OccupancyGrid()
    setupOccGrid(new_coverage, vision_sensor)
    prev_grid = np.array(coverage_grid.grid, copy=True)
    coverage_grid.grid[mask] += new_coverage.grid[mask]
    
    new_mask = new_coverage.grid == 0
    mask = np.logical_and(obstacle_mask, new_mask)

    coverage_grid.draw()
示例#12
0
controller_params.kp = 50.0
controller_params.ki = 0.0
controller_params.kd = 3.0
# Defining robot parameters
robot_params = Params()
robot_params.sensor_offset = 0.05
robot_params.max_wheel_speed = 45.0
robot_params.wheel_radius = 0.02
robot_params.wheels_distance = 0.05
robot_params.wheel_bandwidth = 10.0 * 2.0 * pi
# Defining line sensor parameters
sensor_params = Params()
sensor_params.sensor_range = 0.015
sensor_params.num_sensors = 7
sensor_params.array_width = 0.06
line_follower = LineFollower(Pose(0.5, 0.5, 45.0 * pi / 180.0), controller_params, robot_params, sensor_params)

# Defining PSO hyperparameters
hyperparams = Params()
hyperparams.num_particles = 40
hyperparams.inertia_weight = 0.7
hyperparams.cognitive_parameter = 0.6
hyperparams.social_parameter = 0.8
lower_bound = np.array([0.0, 10.0, 0.0, 0.0])
upper_bound = np.array([0.9, 200.0, 1300.0, 30.0])
pso = ParticleSwarmOptimization(hyperparams, lower_bound, upper_bound)

# Creating track
# Switch to simple track if you are having trouble to make the robot learn in the complex track
track = create_complex_track()  # create_simple_track()
示例#13
0
# Defining robot parameters
robot_params = Params()
robot_params.sensor_offset = 0.05
robot_params.max_wheel_speed = 45.0
robot_params.wheel_radius = 0.02
robot_params.wheels_distance = 0.05
robot_params.wheel_bandwidth = 10.0 * 2.0 * pi
# Defining line sensor parameters
sensor_params = Params()
sensor_params.sensor_range = 0.015
sensor_params.num_sensors = 7
sensor_params.array_width = 0.06
# Defining controller params (including learning algorithm)
linear_speed = 0.7
rl_algorithm = configure_rl_algorithm()
line_follower = LineFollower(Pose(0.0, 0.0, 0.0), rl_algorithm, linear_speed,
                             robot_params, sensor_params)

# Creating track
# Switch to simple track if you are having trouble to make the robot learn in the complex track
track = create_complex_track()  # create_simple_track()

# Creating the simulation
simulation = Simulation(line_follower, track)

# Initializing pygame
pygame.init()
window = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
pygame.display.set_caption("Lab 12 - Model-Free RL Algorithms")
clock = pygame.time.Clock()
font = pygame.font.SysFont('Arial', 20, True)