Пример #1
0
 def __update_sensor_lines(self):
     sensor_start_points = utils.calc_coordinates(self.sensor_angles,
                                                  np.array([[self.robot.radius]]),
                                                  self.robot_positions)
     sensor_end_points = utils.calc_coordinates(self.sensor_angles,
                                                np.array([[self.robot.sensor_len]]),
                                                self.robot_positions)
     self.sensor_lines = utils.calc_line_coordinates(sensor_start_points, sensor_end_points)
     return self.sensor_lines
Пример #2
0
    def test_calc_coordinates(self):
        # arrange
        robot1_sensor_angles = np.array([
            math.radians(-60),  # -1.0471975511965976
            math.radians(-30),  # -0.5235987755982988
            math.radians(0),  # 0.0
            math.radians(30),  # 0.5235987755982988
            math.radians(60)  # 1.0471975511965976
        ])
        robot2_sensor_angles = np.array([
            math.radians(-90),  # -1.5707963267948966
            math.radians(-60),
            math.radians(-30),
            math.radians(0),
            math.radians(30)
        ])
        sensor_angles = np.column_stack(
            (robot1_sensor_angles.T, robot2_sensor_angles.T))

        hypotenuse = 1  # lets make it simple

        source1 = np.array([1, 0]).T
        source2 = np.array([0, 2]).T

        source = np.column_stack((source1, source2))

        # act
        xy = utils.calc_coordinates(sensor_angles, hypotenuse, source)

        # assert
        self.assertEquals(xy.shape, (5, 2, 2))
        self.assertEquals(xy[2, 0, 0], 2)
 def __translate(self, speeds):
     self.robot_positions = utils.calc_coordinates(self.robot_angles,
                                                   speeds,
                                                   self.robot_positions)
     self.robot_bodies = np.add(self.robot.body,
                                np.rint(self.robot_positions)).astype(int)
     return self.robot_positions
Пример #4
0
import numpy as np
import utils
import math
from array2gif import write_gif

map_size = 250
border_size = 2
map = np.ones((map_size, map_size)).astype(int)
map[100:151, 100:151] = 0
map[0:border_size, :] = 0
map[:, 0:border_size] = 0
map[map_size - border_size:, :] = 0
map[:, map_size - border_size:] = 0

map_object = utils.Map(map,  np.array([[[40, 40]]]), np.array([[[210, 210]]]))

robot_body = utils.build_robot_body(10) + [[[65, 65]]]

sensor_angles = np.array(
    [[math.radians(-60), math.radians(-30), math.radians(0), math.radians(30), math.radians(60)]]).T

sensor_start_points = utils.calc_coordinates(sensor_angles, np.array([[10]]), [[[65, 65]]])  # (1, ?, 2)
sensor_end_points = utils.calc_coordinates(sensor_angles, np.array([[30]]), [[[65, 65]]])  # (1, ?, 2)
sensor_lines = utils.calc_line_coordinates(sensor_start_points, sensor_end_points)

img = viz.get_image_zxy(robot_body, sensor_lines, map_object)

write_gif(img, "map.gif", fps=5)

print(str(img))