コード例 #1
0
    def find_best_k_views(self, eye_pos, all_dist, all_pos, avoid_block=False):
        least_order = (np.argsort(all_dist))
        top_k = []
        num_to_test = self.k * 2
        curr_num = 0
        all_pos = np.array(all_pos)

        if not avoid_block:
            return least_order[:self.k]

        while len(top_k) < self.k:
            curr_order = least_order[curr_num: curr_num + num_to_test]
            curr_pos =  all_pos[curr_order]
            print("Curr num", curr_num, "top k", len(top_k), self.k)
            if len(curr_pos) <= p.MAX_RAY_INTERSECTION_BATCH_SIZE:
                collisions = list(p.rayTestBatch([eye_pos] * len(curr_pos), curr_pos))
            else:
                collisions = []
                curr_i = 0
                while (curr_i < len(curr_pos)):
                    curr_n = min(len(curr_pos), curr_i + p.MAX_RAY_INTERSECTION_BATCH_SIZE - 1)
                    collisions = collisions + list(p.rayTestBatch([eye_pos] * (curr_n - curr_i), curr_pos[curr_i: curr_n]))
                    curr_i = curr_n
            has_collision = [c[0] > 0 for c in collisions]
            ## (hzyjerry): ray_casting-based view selection occasionally gives unstable behaviour. Will keep watching on this
            for i, x in enumerate(curr_order):
                if not has_collision[i]:
                    top_k.append(x)
        return top_k
コード例 #2
0
 def find_best_k_views(self, eye_pos, all_dist, all_pos):
     least_order = (np.argsort(all_dist))
     if len(all_pos) <= p.MAX_RAY_INTERSECTION_BATCH_SIZE:
         collisions = list(p.rayTestBatch([eye_pos] * len(all_pos), all_pos))
     else:
         collisions = []
         curr_i = 0
         while (curr_i < len(all_pos)):
             curr_n = min(len(all_pos), curr_i + p.MAX_RAY_INTERSECTION_BATCH_SIZE - 1)
             collisions = collisions + list(p.rayTestBatch([eye_pos] * (curr_n - curr_i), all_pos[curr_i: curr_n]))
             curr_i = curr_n
     collisions  = [c[0] for c in collisions]
     top_k = []
     for i in range(len(least_order)):
         if len(top_k) >= self.k:
             break
         ## (hzyjerry): disabling ray_casting-based view selection because it gives unstable behaviour right now.
         top_k.append(least_order[i])
     if len(top_k) < self.k:
         for o in least_order:
             if o not in top_k:
                 top_k.append(o)
             if len(top_k) >= self.k:
                 break
     return top_k
コード例 #3
0
    def get_obs(self, env):
        """
        Get current LiDAR sensor reading and occupancy grid (optional)

        :return: LiDAR sensor reading and local occupancy grid, normalized to [0.0, 1.0]
        """
        laser_angular_half_range = self.laser_angular_range / 2.0
        if self.laser_link_name not in env.robots[0].parts:
            raise Exception('Trying to simulate LiDAR sensor, but laser_link_name cannot be found in the robot URDF file. Please add a link named laser_link_name at the intended laser pose. Feel free to check out assets/models/turtlebot/turtlebot.urdf and examples/configs/turtlebot_p2p_nav.yaml for examples.')
        laser_pose = env.robots[0].parts[self.laser_link_name].get_pose()
        angle = np.arange(
            -laser_angular_half_range / 180 * np.pi,
            laser_angular_half_range / 180 * np.pi,
            self.laser_angular_range / 180.0 * np.pi / self.n_horizontal_rays)
        unit_vector_local = np.array(
            [[np.cos(ang), np.sin(ang), 0.0] for ang in angle])
        transform_matrix = quat2mat(
            [laser_pose[6], laser_pose[3], laser_pose[4], laser_pose[5]])  # [x, y, z, w]
        unit_vector_world = transform_matrix.dot(unit_vector_local.T).T

        start_pose = np.tile(laser_pose[:3], (self.n_horizontal_rays, 1))
        start_pose += unit_vector_world * self.min_laser_dist
        end_pose = laser_pose[:3] + unit_vector_world * self.laser_linear_range
        results = p.rayTestBatch(start_pose, end_pose, 6)  # numThreads = 6

        # hit fraction = [0.0, 1.0] of self.laser_linear_range
        hit_fraction = np.array([item[2] for item in results])
        hit_fraction = self.noise_model.add_noise(hit_fraction)
        scan = np.expand_dims(hit_fraction, 1)

        state = {}
        state['scan'] = scan
        if 'occupancy_grid' in self.modalities:
            state['occupancy_grid'] = self.get_local_occupancy_grid(scan)
        return state
コード例 #4
0
def pybullet_path_checking(start, end):
    '''

    :param start: starting coordinate in real world
    :return: True:no collision with object in path
             False: collide with object in path
    '''
    pos = end
    dz = 2
    dense_of_ray = 1  #describ how dense the ray for collision detection
    _, _, _, hit_position, _ = p.rayTest(
        start, [start[0], start[1], start[2] - dz])[0]
    height = start[2] - hit_position[2]
    start_pos_array = [[
        start[0], start[1], hit_position[2] + i * height / dense_of_ray
    ] for i in range(1, dense_of_ray + 1)]

    _, _, _, end_hit_position, _ = p.rayTest(pos,
                                             [pos[0], pos[1], pos[2] - dz])[0]
    end_height = pos[2] - end_hit_position[2]
    end_pos_array = [[
        pos[0], pos[1], end_hit_position[2] + i * end_height / dense_of_ray
    ] for i in range(1, dense_of_ray + 1)]

    result = p.rayTestBatch(start_pos_array, end_pos_array)
    for r in result:
        if r[1] == -1:
            #no collision
            continue
        else:
            return False

    return True
コード例 #5
0
def rayTest(robot_id: int,
            ray_length: float,
            ray_num: int = 5,
            physicsClientId: int = 0):
    """
    :param robot_id:   不多说
    :param ray_length: 激光长度
    :param ray_num:    激光数量(需要说明,激光头均匀分布在-pi/2到pi/2之间)
    """
    basePos, baseOrientation = p.getBasePositionAndOrientation(
        robot_id, physicsClientId=physicsClientId)
    matrix = p.getMatrixFromQuaternion(baseOrientation,
                                       physicsClientId=physicsClientId)
    basePos = np.array(basePos)
    matrix = np.array(matrix).reshape([3, 3])

    # 选定在机器人的本地坐标系中中心到几个激光发射点的向量
    # 此处的逻辑为先计算出local坐标系中的距离单位向量,再变换到世界坐标系中
    unitRayVecs = np.array([[cos(alpha), sin(alpha), 0]
                            for alpha in np.linspace(-np.pi / 2., np.pi /
                                                     2., ray_num)])
    unitRayVecs = unitRayVecs.dot(matrix.T)
    # 通过广播运算得到世界坐标系中所有激光发射点的坐标
    rayBegins = basePos + BASE_RADIUS * unitRayVecs
    rayTos = rayBegins + ray_length * unitRayVecs
    results = p.rayTestBatch(rayBegins,
                             rayTos,
                             physicsClientId=physicsClientId)
    return rayBegins, rayTos, results
コード例 #6
0
    def get_scan(self):
        """
        :return: LiDAR sensor reading, normalized to [0.0, 1.0]
        """
        laser_angular_half_range = self.laser_angular_range / 2.0
        if self.laser_link_name not in self.robots[0].parts:
            raise Exception('Trying to simulate LiDAR sensor, but laser_link_name cannot be found in the robot URDF file. Please add a link named laser_link_name at the intended laser pose. Feel free to check out assets/models/turtlebot/turtlebot.urdf and examples/configs/turtlebot_p2p_nav.yaml for examples.')
        laser_pose = self.robots[0].parts[self.laser_link_name].get_pose()
        angle = np.arange(-laser_angular_half_range / 180 * np.pi,
                          laser_angular_half_range / 180 * np.pi,
                          self.laser_angular_range / 180.0 * np.pi / self.n_horizontal_rays)
        unit_vector_local = np.array([[np.cos(ang), np.sin(ang), 0.0] for ang in angle])
        transform_matrix = quat2mat([laser_pose[6], laser_pose[3], laser_pose[4], laser_pose[5]])  # [x, y, z, w]
        unit_vector_world = transform_matrix.dot(unit_vector_local.T).T

        start_pose = np.tile(laser_pose[:3], (self.n_horizontal_rays, 1))
        start_pose += unit_vector_world * self.min_laser_dist
        end_pose = laser_pose[:3] + unit_vector_world * self.laser_linear_range
        results = p.rayTestBatch(start_pose, end_pose, 6)  # numThreads = 6

        hit_fraction = np.array([item[2] for item in results])  # hit fraction = [0.0, 1.0] of self.laser_linear_range
        hit_fraction = self.add_naive_noise_to_sensor(hit_fraction, self.scan_noise_rate)
        scan = np.expand_dims(hit_fraction, 1)

        xyz = hit_fraction[:, np.newaxis] * unit_vector_local * 10
        xyz = xyz[np.equal(np.isnan(xyz), False)]
        xyz = xyz.reshape(xyz.shape[0] // 3, -1)
        return xyz#scan
コード例 #7
0
    def _laserScan(self):
        """
        INTERNAL METHOD, a loop that simulate the laser and update the distance
        value of each laser
        """
        lastLidarTime = time.time()

        while not self._module_termination:
            nowLidarTime = time.time()

            if (nowLidarTime - lastLidarTime > 1 / LASER_FRAMERATE):
                results = pybullet.rayTestBatch(
                    self.ray_from,
                    self.ray_to,
                    parentObjectUniqueId=self.robot_model,
                    parentLinkIndex=self.laser_id,
                    physicsClientId=self.physics_client)

                for i in range(NUM_RAY * len(ANGLE_LIST_POSITION)):
                    hitObjectUid = results[i][0]
                    hitFraction = results[i][2]
                    hitPosition = results[i][3]
                    self.laser_value[i] = hitFraction * RAY_LENGTH

                    if self.display:
                        if not self.ray_ids:
                            self._createDebugLine()

                        if (hitFraction == 1.):
                            pybullet.addUserDebugLine(
                                self.ray_from[i],
                                self.ray_to[i],
                                RAY_MISS_COLOR,
                                replaceItemUniqueId=self.ray_ids[i],
                                parentObjectUniqueId=self.robot_model,
                                parentLinkIndex=self.laser_id,
                                physicsClientId=self.physics_client)
                        else:  # pragma: no cover
                            localHitTo = [
                                self.ray_from[i][0] + hitFraction *
                                (self.ray_to[i][0] - self.ray_from[i][0]),
                                self.ray_from[i][1] + hitFraction *
                                (self.ray_to[i][1] - self.ray_from[i][1]),
                                self.ray_from[i][2] + hitFraction *
                                (self.ray_to[i][2] - self.ray_from[i][2])
                            ]
                            pybullet.addUserDebugLine(
                                self.ray_from[i],
                                localHitTo,
                                RAY_HIT_COLOR,
                                replaceItemUniqueId=self.ray_ids[i],
                                parentObjectUniqueId=self.robot_model,
                                parentLinkIndex=self.laser_id,
                                physicsClientId=self.physics_client)

                    else:
                        if self.ray_ids:
                            self._resetDebugLine()

                lastLidarTime = nowLidarTime
コード例 #8
0
def single_test(urdf_file, isDIRECT=True):
    p.connect(p.DIRECT if isDIRECT else p.GUI)
    p.setAdditionalSearchPath(pybullet_data.getDataPath())
    p.loadURDF(urdf_file, [3, 3, 1])

    numRays = 1024
    rayLen = 13

    info_lst = []
    rayFrom_lst = []
    rayTo_lst = []

    for i in range(numRays):
        rayFrom = [0, 0, 1]
        rayTo = [
            rayLen * math.sin(2. * math.pi * float(i) / numRays),
            rayLen * math.cos(2. * math.pi * float(i) / numRays), 1
        ]

        info = p.rayTest(rayFrom, rayTo)
        info_lst.append(info[0])

        p.addUserDebugLine(rayFrom, rayTo, [1, 0, 0])
        rayFrom_lst.append(rayFrom)
        rayTo_lst.append(rayTo)

    info_lst_batch = p.rayTestBatch(rayFrom_lst, rayTo_lst)

    collide_predicate = lambda info: info[0] != -1
    info_filtered = list(filter(collide_predicate, info_lst))
    info_batch_filtered = list(filter(collide_predicate, info_lst_batch))

    p.disconnect()

    return info_filtered, info_batch_filtered
コード例 #9
0
    def compute_points_from_camera(self, camera, sample_size, max_range, sigma,
                                   sigma_pure, dt):
        #p.removeAllUserDebugItems()
        ray_from = []
        ray_to = []

        rand_x = np.random.normal(camera.center[0], sigma, sample_size)
        rand_x = np.insert(
            rand_x, 0,
            camera.center[0])  # insert center for center of attention

        rand_y = np.random.normal(camera.center[1], sigma, sample_size)
        rand_y = np.insert(
            rand_y, 0,
            camera.center[1])  # insert center for center of attention

        target_matrix = np.zeros((4, sample_size + 1))
        target_matrix[0, :] = -0.5
        target_matrix[1, :] = 0.5
        target_matrix[2, :] = rand_x
        target_matrix[3, :] = -rand_y

        ortho = camera.trans_mat.dot(target_matrix)
        rayTo = (ortho +
                 (camera.forward - camera.position)[:, np.newaxis]) * max_range

        for s in range(sample_size):
            target = rayTo[:, s]
            ray_from.append(camera.position)
            ray_to.append(target)

        hits = p.rayTestBatch(ray_from, ray_to)

        point_clouds = {}

        element_center_attention = hits[0][0]  # get object id
        center_point = hits[0][3]  # get hit position
        max_radius = 1.0

        # name, pos, intensity
        selected_hits = self.compute_intensity(hits[0],
                                               hits[1:],
                                               max_radius,
                                               gamma=0.5,
                                               penalty=1.0,
                                               dt=dt)

        for h in selected_hits:
            #p.addUserDebugLine(camera.position, [h[1], h[2], h[3]], rayMissColor)
            if h[0] not in point_clouds.keys():
                point_clouds[h[0]] = []
            point_clouds[h[0]].append(h[1:])

        return point_clouds, element_center_attention, center_point
コード例 #10
0
ファイル: debug_github.py プロジェクト: HiroIshida/snippets
def test_smallsphere(pos_):
    pos = np.array(pos_)
    ss = p.loadURDF("r2d2.urdf")
    p.resetBasePositionAndOrientation(ss, pos, [0, 0, 0, 1])
    margin = np.array([0.5, 0.0, 0.0])
    p_start = pos - margin
    p_end = pos + margin
    p.addUserDebugLine(p_start, p_end, [1, 0, 0])
    input("press")
    result = p.rayTestBatch([p_start], [p_end])
    return result
コード例 #11
0
	def read_laser_scan(self):
		numRays=10

		# rayFrom,rayTo = self.init_laserscanner()
		numThreads=0
		results = p.rayTestBatch(self.rayFrom,self.rayTo,numThreads, parentObjectUniqueId=self.robot_uid, parentLinkIndex=self.hokuyo_joint)
		result = []
		trans,rot = p.getBasePositionAndOrientation(self.robot_uid)
		for i in range (numRays):

			laser = np.linalg.norm(np.array([trans[0],trans[1]]) - np.array([results[i][3][0],results[i][3][1]]))
			result.append(laser)
		return np.array([result])
コード例 #12
0
def batch_ray_collision(rays, threads=1):
    assert 1 <= threads <= p.MAX_RAY_INTERSECTION_BATCH_SIZE
    if not rays:
        return []
    step_simulation() # Needed for some reason
    ray_starts = [start for start, _ in rays]
    ray_ends = [end for _, end in rays]
    return [RayResult(*tup) for tup in p.rayTestBatch(
        ray_starts, ray_ends,
        numThreads=threads,
        #parentObjectUniqueId=
        #parentLinkIndex=
        physicsClientId=CLIENT)]
コード例 #13
0
ファイル: features.py プロジェクト: mdheller/costar_plan
    def compute(self, world, state):
        import pybullet as pb
        object_translation_rotation = []
        # camera.py ImageData namedtuple
        camera = world.cameras[0]
        image_data = camera.capture()
        image_data_arrays = [np.array(value) for value in image_data] + \
            [camera.matrix, camera.projection_matrix]
        # 'camera_view_matrix' namedtuple index is 4
        # TODO(ahundt) ensure camera matrix translation component is from world origin to camera origin
        # camera ray is from the origin of the camera
        camera_transform_array = np.transpose(image_data[4]).reshape((4, 4))
        # use the more conveniently formatted transform for writing out
        image_data_arrays[4] = camera_transform_array
        camera_translation = camera_transform_array[0:3, 3].tolist()
        # print("TEST IMAGEDATA named tuple img matrix: \n", )
        # print("TEST IMAGEDATA named tuple img matrix translation: ", camera_translation)
        camera_ray_from = [camera_translation] * len(
            world.id_by_object.items())
        # camera_ray_to is the center of each object
        camera_ray_to = []
        for name, oid in world.id_by_object.items():
            # print("oid type:", str(type(oid)))
            # print("actor type:", str(type(world.actors[oid])))
            obj = world.actors[oid].getState()
            p = obj.T.p
            q = obj.T.M.GetQuaternion()
            one_t_r = np.array([p[0], p[1], p[2], q[0], q[1], q[2], q[3]],
                               dtype=np.float32)
            object_translation_rotation.append(one_t_r)
            camera_ray_to.append(list(obj.T.p))

        # print("lengths: ", len(camera_ray_from), len(camera_ray_to))
        object_surface_points = []
        # TODO(ahundt) allow multiple simulations to run
        raylist = pb.rayTestBatch(camera_ray_from, camera_ray_to)
        for i, (uid, linkidx, hitfrac, hitpos,
                hitnormal) in enumerate(raylist):
            if uid is -1:
                # if the object wasn't hit, use its origin
                name, oid = world.id_by_object.items()[i]
                obj = world.actors[oid].getState()
                object_surface_points += [obj.T.p]
            else:
                object_surface_points += hitpos

        return [
            np.array(object_translation_rotation),
            np.array(state.arm),
            np.array(state.gripper)
        ] + image_data_arrays[1:] + [np.array(object_surface_points)]
コード例 #14
0
    def draw_rays(self):
        '''
        draws rays to be visualized in sim
        '''
        ray_to = np.zeros([self.ray_count, 3])
        ray_ids = np.zeros(self.ray_count)
        ray_angles = np.mod(self.yaw + self.rays, 2 * np.pi)
        for ii, th in enumerate(ray_angles):
            ray_to[ii] = self.x+ np.array([self.ray_len*np.cos(th),\
                                self.ray_len*np.sin(th),0])

            if self.debug:
                p.addUserDebugLine(self.x, ray_to[ii], self.ray_miss_color)
        return p.rayTestBatch(self.ray_from, ray_to)
コード例 #15
0
    def getlocalMapObservation(self):
        ### raycast around the area of the agent
        """
            For now this includes the agent in the center of the computation
        """
        import numpy as np

        pos = p.getBasePositionAndOrientation(self._agent)[0]
        ### number of samples
        size = 16
        ### width of box
        dimensions = 8.0
        toRays = []
        for i in range(0, size):
            for j in range(0, size):
                toRays.append([(1.0 / (size * 1.0)) * i * dimensions,
                               (1.0 / (size * 1.0)) * j * dimensions, 0])
        assert (len(toRays) == (size * size))
        toRays = np.array(toRays)
        ### Adjust to put agent in middle of map
        toRays = toRays + pos - np.array(
            [dimensions / 2.0, dimensions / 2.0, 0])
        # print ("toRays:", toRays )

        fromRays = toRays + np.array([0, 0, 5])
        rayResults = p.rayTestBatch(fromRays, toRays)
        intersections = [ray[0] for ray in rayResults]
        # print (intersections)
        ### fix intersections that could be goal

        for ray in range(len(intersections)):
            if (intersections[ray] in [self._target, self._agent]):
                # print ("bad index: ", ray)
                intersections[ray] = -1

        # bad_indecies = np.where(intersections == self._target)[0]
        # print ("bad_indecies: ", bad_indecies)
        # bad_indecies = np.where(intersections == int(self._agent))
        # print ("bad_indecies: ", bad_indecies)
        # print ("self._agent: ", self._agent)
        """
        if ( len(bad_indecies) > 0):
            # print ("self._target: ", self._target)
            intersections[bad_indecies] = -1
        """
        # intersections_ = np.reshape(intersections, (size, size))
        # print ("intersections", intersections_)
        intersections = np.array(np.greater(intersections, 0), dtype="int")
        return intersections
コード例 #16
0
def get_range_reading(theta):
    ranges = []
    rayFroms = [lidar_pos for i in range(num_rays)]
    rayTos = get_batch_ray_to(theta)
    rayTestInfo = p.rayTestBatch(rayFroms, rayTos)
    for i in range(len(rayTestInfo)):
        # compute hit distance
        d = float(rayTestInfo[i][2]) * lidar_range
        # if no hit
        if rayTestInfo[i][0] == -1:
            d = lidar_range
        ranges.append(1 - d / lidar_range)
    # Draw debug rays
    for i in range(0, len(rayTos), 5):
        p.addUserDebugLine(lidar_pos, rayTos[i], [1, 0, 0], lifeTime=0.6)
    return ranges
コード例 #17
0
    def get_lidar_scan(self):
        if self.valid_buffer_scan:
            return self.last_scan
        '''
        Obtain lidar scan values for current state.

        Returns:
            list: Scan values for range and resolution specified in
                params dict.
        '''
        scan_front = None
        scan_rear = None

        # Get pose of lidar
        states = pb.getLinkState(self.robotId, self.lidarLinkId1, False, False,
                                 self.clientId)
        lidar_pos, lidar_ori = states[4:6]
        lidar_pos = np.array(lidar_pos)
        R = np.array(pb.getMatrixFromQuaternion(lidar_ori))
        R = np.reshape(R, (3, 3))
        scan_l1 = R.dot(self.rays[0]).T + lidar_pos
        scan_h1 = R.dot(self.rays[1]).T + lidar_pos

        # Get pose of lidar
        states = pb.getLinkState(self.robotId, self.lidarLinkId2, False, False,
                                 self.clientId)
        lidar_pos, lidar_ori = states[4:6]
        lidar_pos = np.array(lidar_pos)
        R = np.array(pb.getMatrixFromQuaternion(lidar_ori))
        R = np.reshape(R, (3, 3))
        scan_l2 = R.dot(self.rays[0]).T + lidar_pos
        scan_h2 = R.dot(self.rays[1]).T + lidar_pos

        scan_l = np.concatenate((scan_l1, scan_l2))
        scan_h = np.concatenate((scan_h1, scan_h2))

        scan_r = pb.rayTestBatch(scan_l.tolist(), scan_h.tolist(),
                                 self.clientId)

        scan = [x[2] * self.p["sensors"]["lidar"]["range"] for x in scan_r]
        scan_front = scan[:len(scan_l1)]
        scan_rear = scan[len(scan_l1):]
        self.last_scan = [scan_front, scan_rear]
        self.valid_buffer_scan = True
        return [scan_front, scan_rear]
コード例 #18
0
ファイル: spatial_engine.py プロジェクト: jadnohra/daisy
    def cast_rays_spherical(self, origin, azimuth_from, azimuth_to,
                            azimuth_res, polar_from, polar_to, polar_res,
                            max_ray_len):
        def interp(frm, to, i, res):
            return frm + (to - frm) * (i / float(max(res - 1, 1)))

        ray_count = polar_res * azimuth_res
        to_pts = [None] * ray_count

        ray_i = 0
        for pol_i in range(polar_res):
            el = interp(polar_from, polar_to, pol_i, polar_res)
            rcos_theta = max_ray_len * np.cos(el)
            z = max_ray_len * np.sin(el)
            for azi_i in range(azimuth_res):
                az = interp(azimuth_from, azimuth_to, azi_i, azimuth_res)
                x = rcos_theta * np.cos(az)
                y = rcos_theta * np.sin(az)
                to_pts[ray_i] = (x, y, z)
                ray_i = ray_i + 1

        results = []
        tested_ray_count = 0
        while ray_count - tested_ray_count > 0:
            # the '-1' is due to seemingly a bug in pybullet, which seems to return
            # one result less than the batch size, when using the full batch size
            batch_size = min(ray_count - tested_ray_count,
                             bullet.MAX_RAY_INTERSECTION_BATCH_SIZE - 1)
            batch_results = bullet.rayTestBatch(
                [origin] * batch_size,
                to_pts[tested_ray_count:tested_ray_count + batch_size])
            results.extend(batch_results)
            tested_ray_count = tested_ray_count + batch_size
        hits = [None] * ray_count
        hit_i = 0
        for i in range(ray_count):
            hit_uid = results[i][0]
            if hit_uid >= 0:
                hit_pos = results[i][3]
                hits[hit_i] = (hit_uid, hit_pos)
                hit_i = hit_i + 1

        return hits[:hit_i]
コード例 #19
0
    def observe(self) -> NDArray[(Any, ), np.float]:
        results = p.rayTestBatch(self._from,
                                 self._to,
                                 0,
                                 parentObjectUniqueId=self.body_id,
                                 parentLinkIndex=self.joint_index)
        hit_fractions = np.array(results, dtype=object)[:,
                                                        2].astype(dtype=float)
        ranges = self._config.range * hit_fractions + self._config.min_range
        noise = np.random.uniform(1.0 - self._config.accuracy,
                                  1.0 + self._config.accuracy,
                                  size=ranges.shape)
        scan = np.clip(ranges * noise,
                       a_min=self._config.min_range,
                       a_max=self._config.range)

        if self._config.debug:
            self._display_rays(hit_fractions, scan)

        return scan
コード例 #20
0
    def detectLaserCollisions(self):
        '''Detect collision of laser rays and render it on GUI'''

        for body in self.bodies:
            if body.hasLaser:
                for i in range(len(body.lasers)):
                    (pos, _) = p.getBasePositionAndOrientation(body.bodyId)
                    rayFrom = body.lasers[i].laserRayBatchFrom
                    rayTo = body.lasers[i].laserRayBatchTo

                    newFrom = []
                    newTo = []

                    for k in range(len(rayFrom)):
                        newFrom.append([
                            rayFrom[k][0] + pos[0], rayFrom[k][1] + pos[1],
                            rayFrom[k][2] + pos[2]
                        ])
                        newTo.append([
                            rayTo[k][0] + pos[0], rayTo[k][1] + pos[1],
                            rayTo[k][2] + pos[2]
                        ])

                    results = p.rayTestBatch(newFrom, newTo)

                    for j in range(len(rayFrom)):
                        hitObjectUid = results[j][0]

                        if (hitObjectUid < 0):
                            p.addUserDebugLine(newFrom[j],
                                               newTo[j],
                                               lineColorRGB=[1, 0, 0],
                                               lifeTime=0.1)
                        else:
                            hitPosition = results[j][3]
                            p.addUserDebugLine(newFrom[j],
                                               hitPosition,
                                               lineColorRGB=[0, 1, 0],
                                               lifeTime=0.1)
コード例 #21
0
def getRangeReading(theta):
    """
    Simulate laser range readings
    :param theta: current angle
    :return:
    """

    ranges = []
    rayFroms = [lidar_pos for i in range(num_rays)]
    rayTos = getBatchRayTo(theta)
    rayTestInfo = p.rayTestBatch(rayFroms, rayTos)
    for i in range(len(rayTestInfo)):
        # compute hit distance
        d = float(rayTestInfo[i][2]) * lidar_range
        # if no hit
        if rayTestInfo[i][0] == -1:
            d = lidar_range
        ranges.append(d)
    # Draw debug rays
    # for i in range(len(rayTos)):
    #   p.addUserDebugLine(lidar_pos, rayTos[i], [1,0,0], lifeTime=0.1)
    return ranges
コード例 #22
0
    def look(self, dist, viewAngle, maxGap):
        """looks at the environment
        by performing a ray test batch centered at the position of the character in the direction its facing
        input:
            dist: maximum sight distance 
            viewAngle: the field of view (in degree)
            maxGap: the maximum arclength between two adjacent raycasts
        output:
            hitObjList: a list of nested lists that contain the object IDs that were hit and their positions
        """
        viewAngle = math.radians(viewAngle)
        center = self.pos
        z_rot = self.yaw
        numRayCasts = math.ceil((dist) * (viewAngle) / (maxGap + 0.0)) + 1
        startPosList = [center] * numRayCasts
        endPosList = []
        viewAngle = maxGap * (numRayCasts - 1) / (
            dist + 0.0
        )  # because numRayCasts is rounded up, original view angle has likely changed
        angle = z_rot - viewAngle / 2
        for i in range(numRayCasts):
            x = center[0] + math.cos(angle) * dist
            y = center[1] + math.sin(angle) * dist
            z = maxGap / 2.0
            endPosList.append([x, y, z])
            angle += viewAngle / (numRayCasts - 1)
        rayList = p.rayTestBatch(startPosList,
                                 endPosList,
                                 collisionFilterMask=mv.RAY_MASK)

        hitObjList = []
        for rayOutput in rayList:
            if rayOutput[0] != -1 and rayOutput[0] not in hitObjList:
                hitID = rayOutput[0]
                objPos = hsm.objIDToObject[hitID].pos
                hitObjList.append([hitID, objPos])
        return hitObjList
コード例 #23
0
    def get_lidar_scan(self, extra_outputs=False):
        '''
        Obtain lidar scan values for current state.

        Returns:
            list: Scan values for range and resolution specified in
                params dict.
        '''
        if self.valid_buffer_scan:
            if extra_outputs:
                return self.last_scan, self.last_extra_scan_output
            return self.last_scan

        scan_front = None
        scan_rear = None

        scan_l1, scan_h1 = self.get_lidar_rays(self.lidarLinkId1)
        scan_l2, scan_h2 = self.get_lidar_rays(self.lidarLinkId2)

        scan_l = np.concatenate((scan_l1, scan_l2))
        scan_h = np.concatenate((scan_h1, scan_h2))

        t0 = time.time()
        scan_r = pb.rayTestBatch(scan_l.tolist(), scan_h.tolist(),
                                 self.clientId)
        #print (time.time()-t0)
        scan = [x[2] * self.p["sensors"]["lidar"]["range"] for x in scan_r]
        scan_front = scan[:len(scan_l1)]
        scan_rear = scan[len(scan_l1):]
        extra_scan_output = [scan_l, scan_h, len(scan_l1)]
        self.last_scan = [scan_front, scan_rear]
        self.last_extra_scan_output = extra_scan_output
        self.valid_buffer_scan = True
        if extra_outputs:
            return [scan_front, scan_rear], extra_scan_output

        return [scan_front, scan_rear]
コード例 #24
0
 def raycast(self,
             offset=torch.zeros(3),
             directions=torch.tensor([1., 0., 0.]),
             body=True,
             RANGE=100.0):
     directions *= RANGE
     if len(directions.shape) == 1:
         directions = directions.unsqueeze(0)
     if len(offset.shape) == 1:
         offset = offset.expand(directions.shape[0], -1)
     R = self.get_ori(mat=True)
     pos = self.get_pos()
     if body:
         offset = (R @ offset.T).T
         directions = (R @ directions.T).T
     start = offset + pos
     start = start.expand(directions.shape[0], -1)
     end = directions + start
     rays = p.rayTestBatch(rayFromPositions=start.tolist(),
                           rayToPositions=end.tolist(),
                           physicsClientId=self.sim.id)
     ray_dict = {}
     ray_dict["object"] = [
         self.env.object_dict.get(ray[0], None) for ray in rays
     ]
     ray_dict["pos world"] = torch.stack([
         torch.tensor(ray[3]) - offset[i, :] for i, ray in enumerate(rays)
     ])
     ray_dict["pos"] = (R.T @ ray_dict['pos world'].T -
                        (R.T @ pos)[:, None]).T
     for i, obj in enumerate(ray_dict["object"]):
         if obj is None:
             ray_dict["pos world"][i, :] = torch.zeros(3)
             ray_dict["pos"][i, :] = torch.zeros(3)
     ray_dict["dist"] = ray_dict['pos'].norm(dim=-1)
     return ray_dict
コード例 #25
0
    def _do_ray_tracing_and_publish(self):

        ray_from_position_list = [
            self._position_from_matrix(x[0]) for x in self._laser_rays
        ]
        ray_to_position_list = [
            self._position_from_matrix(x[1]) for x in self._laser_rays
        ]

        # for i in range(len(ray_from_position_list)):
        #     rospy.loginfo("RAY {}: from {} to {}".format(i, ray_from_position_list[i], ray_to_position_list[i]))

        ray_tracing_output = pybullet.rayTestBatch(
            rayFromPositions=ray_from_position_list,
            rayToPositions=ray_to_position_list)

        self._msg.header.stamp = rospy.Time.now()
        self._msg.ranges = []

        for ray in ray_tracing_output:
            self._msg.ranges.append(self._range_min +
                                    (self._range_max - self._range_min) *
                                    ray[2] - 0.01)
        self._laser_scan_publisher.publish(self._msg)
コード例 #26
0
    def step(self, action):
        # Apply control action (120 Hz, each 1st step)
        if (self.stepCounter % 1 == 0 and self.stepCounter != 0):
            self.velocity = action[0]
            self.steeringAngle = action[1]
            self.force = action[2]
            self.act()

        # Update camera data (120 Hz, each 1st step)
        if self.cameraStatus is True:
            if (self.stepCounter % 1 == 0):
                self.snapshot, self.snapshotPath, self.nextSnapshotPath = self.takeSnapshot()

        # Update lidar data (120 Hz, each 1st step)
        if (self.stepCounter % 1 == 0):
            hitFractions = np.zeros(self.numRays, dtype=np.int8)
            distances = np.zeros(self.numRays, dtype=np.float64)

            numThreads = 0
            results = p.rayTestBatch(self.rayFrom, self.rayTo, numThreads,
                                     parentObjectUniqueId=self.car, parentLinkIndex=self.lidar_joint)
            for i in range(self.numRays):
                hitObjectUid = results[i][0]
                hitFraction = results[i][2]
                hitPosition = results[i][3]

                hitFractions[i] = hitFraction

                if (hitFraction == 1.):
                    p.addUserDebugLine(self.rayFrom[i], self.rayTo[i], self.rayMissColor, replaceItemUniqueId=self.rayIds[i],
                                       parentObjectUniqueId=self.car, parentLinkIndex=self.lidar_joint)
                    distances[i] = self.rayLen
                else:
                    localHitTo = [self.rayFrom[i][0] + hitFraction*(self.rayTo[i][0] - self.rayFrom[i][0]),
                                  self.rayFrom[i][1] + hitFraction *
                                  (self.rayTo[i][1] - self.rayFrom[i][1]),
                                  self.rayFrom[i][2] + hitFraction*(self.rayTo[i][2] - self.rayFrom[i][2])]
                    p.addUserDebugLine(self.rayFrom[i], localHitTo, self.rayHitColor, replaceItemUniqueId=self.rayIds[i],
                                       parentObjectUniqueId=self.car, parentLinkIndex=self.lidar_joint)
                    distances[i] = np.linalg.norm(
                        np.array(localHitTo, dtype=np.float64) - self.rayFrom[i])

            # self.observation = hitFractions
            distances = distances / self.rayLen
            self.observation = distances

        p.stepSimulation()

        carVelocity = p.getBaseVelocity(self.car)[0] # get linear velocity only
        carSpeed = np.linalg.norm(carVelocity)
        reward = carSpeed*self.dt

        if carSpeed <= 0.3:
            self.stuckCounter += 1
            #print(self.stuckCounter, carSpeed)

        if self.stuckCounter >= self.stopThrehold:
            done = True
        else:
            done = False

        if self.storeData is True:
            datasetRow = [self.snapshotPath, action, reward, int(done), self.nextSnapshotPath]
            self.dataset.writerow(datasetRow)

        # Update step counter
        self.stepCounter += 1

        return self.observation, reward, done, {}
コード例 #27
0
		camTarget = [camPos[0]+forwardVec[0]*10,camPos[1]+forwardVec[1]*10,camPos[2]+forwardVec[2]*10]
		camUpTarget = [camPos[0]+camUpVec[0],camPos[1]+camUpVec[1],camPos[2]+camUpVec[2]]
		viewMat = p.computeViewMatrix(camPos, camTarget, camUpVec)
		projMat = camInfo[3]
		#p.getCameraImage(320,200,viewMatrix=viewMat,projectionMatrix=projMat, flags=p.ER_NO_SEGMENTATION_MASK, renderer=p.ER_BULLET_HARDWARE_OPENGL)
		p.getCameraImage(320,200,viewMatrix=viewMat,projectionMatrix=projMat, renderer=p.ER_BULLET_HARDWARE_OPENGL)
		lastTime=nowTime
	
	nowControlTime = time.time()
	
	nowLidarTime = time.time()
	#lidar at 20Hz
	if (nowLidarTime-lastLidarTime>.3):
		#print("Lidar!")
		numThreads=0
		results = p.rayTestBatch(rayFrom,rayTo,numThreads, parentObjectUniqueId=car, parentLinkIndex=hokuyo_joint)
		for i in range (numRays):
			hitObjectUid=results[i][0]
			hitFraction = results[i][2]
			hitPosition = results[i][3]
			if (hitFraction==1.):
				p.addUserDebugLine(rayFrom[i],rayTo[i], rayMissColor,replaceItemUniqueId=rayIds[i],parentObjectUniqueId=car, parentLinkIndex=hokuyo_joint)
			else:
				localHitTo = [rayFrom[i][0]+hitFraction*(rayTo[i][0]-rayFrom[i][0]),
											rayFrom[i][1]+hitFraction*(rayTo[i][1]-rayFrom[i][1]),
											rayFrom[i][2]+hitFraction*(rayTo[i][2]-rayFrom[i][2])]
				p.addUserDebugLine(rayFrom[i],localHitTo, rayHitColor,replaceItemUniqueId=rayIds[i],parentObjectUniqueId=car, parentLinkIndex=hokuyo_joint)
		lastLidarTime = nowLidarTime
		
	#control at 100Hz
	if (nowControlTime-lastControlTime>.01):
コード例 #28
0
ファイル: batchRayCast.py プロジェクト: CGTGPY3G1/bullet3
	if (replaceLines):
		rayIds.append(p.addUserDebugLine(rayFrom[i], rayTo[i], rayMissColor))
	else:
		rayIds.append(-1)

if (not useGui):
	timingLog = p.startStateLogging(p.STATE_LOGGING_PROFILE_TIMINGS,"rayCastBench.json")

numSteps = 10
if (useGui):
	numSteps = 327680

for i in range (numSteps):
	p.stepSimulation()
	for j in range (8):
		results = p.rayTestBatch(rayFrom,rayTo,j+1)
	
	#for i in range (10):
	#	p.removeAllUserDebugItems()	
		
		
	
	
	if (useGui):
		if (not replaceLines):
			p.removeAllUserDebugItems()
			
		for i in range (numRays):
			hitObjectUid=results[i][0]
			
			
コード例 #29
0
ファイル: locomotor_env.py プロジェクト: rrwiyatn/GibsonEnvV2
    def get_state(self, collision_links=[]):
        # calculate state
        # sensor_state = self.robots[0].calc_state()
        # sensor_state = np.concatenate((sensor_state, self.get_additional_states()))
        sensor_state = self.get_additional_states()

        state = OrderedDict()
        if 'sensor' in self.output:
            state['sensor'] = sensor_state
        if 'pointgoal' in self.output:
            state['pointgoal'] = sensor_state[:2]
        if 'rgb' in self.output:
            state['rgb'] = self.simulator.renderer.render_robot_cameras(
                modes=('rgb'))[0][:, :, :3]
        if 'depth' in self.output:
            depth = -self.simulator.renderer.render_robot_cameras(
                modes=('3d'))[0][:, :, 2:3]
            state['depth'] = depth
        if 'normal' in self.output:
            state['normal'] = self.simulator.renderer.render_robot_cameras(
                modes='normal')
        if 'seg' in self.output:
            state['seg'] = self.simulator.renderer.render_robot_cameras(
                modes='seg')
        if 'rgb_filled' in self.output:
            with torch.no_grad():
                tensor = transforms.ToTensor()(
                    (state['rgb'] * 255).astype(np.uint8)).cuda()
                rgb_filled = self.comp(tensor[None, :, :, :])[0].permute(
                    1, 2, 0).cpu().numpy()
                state['rgb_filled'] = rgb_filled
        if 'bump' in self.output:
            state[
                'bump'] = -1 in collision_links  # check collision for baselink, it might vary for different robots

        if 'pointgoal' in self.output:
            state['pointgoal'] = sensor_state[:2]

        if 'scan' in self.output:
            assert 'scan_link' in self.robots[
                0].parts, "Requested scan but no scan_link"
            pose_camera = self.robots[0].parts['scan_link'].get_pose()
            n_rays_per_horizontal = 128  # Number of rays along one horizontal scan/slice

            n_vertical_beams = 9
            angle = np.arange(0, 2 * np.pi,
                              2 * np.pi / float(n_rays_per_horizontal))
            elev_bottom_angle = -30. * np.pi / 180.
            elev_top_angle = 10. * np.pi / 180.
            elev_angle = np.arange(elev_bottom_angle, elev_top_angle,
                                   (elev_top_angle - elev_bottom_angle) /
                                   float(n_vertical_beams))
            orig_offset = np.vstack([
                np.vstack([
                    np.cos(angle),
                    np.sin(angle),
                    np.repeat(np.tan(elev_ang), angle.shape)
                ]).T for elev_ang in elev_angle
            ])
            transform_matrix = quat2mat([
                pose_camera[-1], pose_camera[3], pose_camera[4], pose_camera[5]
            ])
            offset = orig_offset.dot(np.linalg.inv(transform_matrix))
            pose_camera = pose_camera[None, :3].repeat(n_rays_per_horizontal *
                                                       n_vertical_beams,
                                                       axis=0)

            results = p.rayTestBatch(pose_camera, pose_camera + offset * 30)
            hit = np.array([item[0] for item in results])
            dist = np.array([item[2] for item in results])
            dist[dist >= 1 - 1e-5] = np.nan
            dist[dist < 0.1 / 30] = np.nan

            dist[hit == self.robots[0].robot_ids[0]] = np.nan
            dist[hit == -1] = np.nan
            dist *= 30

            xyz = dist[:, np.newaxis] * orig_offset
            xyz = xyz[np.equal(np.isnan(xyz), False)]  # Remove nans
            #print(xyz.shape)
            xyz = xyz.reshape(xyz.shape[0] // 3, -1)
            state['scan'] = xyz

        return state
コード例 #30
0
			matrix = p.getMatrixFromQuaternion(orientation)
			matrix = np.reshape(matrix, (3, 3))

			src = np.array(position) 
			src = src + np.matmul(matrix,offset)

			path.calcDist(src)
			h = 10

			rays_src = np.repeat([src], num_rays, axis=0)

			orn = np.matmul(matrix, rot) #rotates unit vector y to -x

			rays_end = np.matmul(orn, rays) # unit vector in direction of minitaur
			rays_end = (rays_end + src[:, None]).T
			rays_info = p.rayTestBatch(rays_src.tolist(), rays_end.tolist())

			b = np.asarray([int(i[0]) for i in rays_info])

			for i in range(h-1):

				rays = np.concatenate(([ray_length*np.sin(angles)], [ray_length*np.cos(angles)], [np.full((num_rays,), i+1)]), axis=0)

				rays_end = np.matmul(orn, rays) # unit vector in direction of minitaur
				rays_end = (rays_end + src[:, None]).T

				rays_info = p.rayTestBatch(rays_src.tolist(), rays_end.tolist())

				b = np.vstack((b, np.asarray([int(i[0]) for i in rays_info])))

			nth_ray = find_largest_gap(b)
コード例 #31
0
    def step(self):
        quadruped = self.quadruped
        bodyPos, bodyOrn = p.getBasePositionAndOrientation(quadruped)
        linearVel, angularVel = p.getBaseVelocity(quadruped)
        bodyEuler = p.getEulerFromQuaternion(bodyOrn)
        kp = p.readUserDebugParameter(self.IDkp)
        kd = p.readUserDebugParameter(self.IDkd)
        maxForce = p.readUserDebugParameter(self.IDmaxForce)

        self.handleCamera(bodyPos, bodyOrn)
        self.addInfoText(bodyPos, bodyEuler, linearVel, angularVel)

        if self.checkSimulationReset(bodyOrn):
            return False

        # Calculate Angles with the input of FeetPos,BodyRotation and BodyPosition
        angles = self.kin.calcIK(self.Lp, self.rot, self.pos)

        for lx, leg in enumerate(
            ['front_left', 'front_right', 'rear_left', 'rear_right']):
            for px, part in enumerate(['shoulder', 'leg', 'foot']):
                j = self.jointNameToId[leg + "_" + part]
                p.setJointMotorControl2(bodyIndex=quadruped,
                                        jointIndex=j,
                                        controlMode=p.POSITION_CONTROL,
                                        targetPosition=angles[lx][px] *
                                        self.dirs[lx][px],
                                        positionGain=kp,
                                        velocityGain=kd,
                                        force=maxForce)

        nowLidarTime = time.time()
        if (nowLidarTime - self.lastLidarTime > .2):
            numThreads = 0
            results = p.rayTestBatch(self.rayFrom,
                                     self.rayTo,
                                     numThreads,
                                     parentObjectUniqueId=quadruped,
                                     parentLinkIndex=0)
            for i in range(self.numRays):
                hitObjectUid = results[i][0]
                hitFraction = results[i][2]
                hitPosition = results[i][3]
                if (hitFraction == 1.):
                    if self.debugLidar:
                        p.addUserDebugLine(self.rayFrom[i],
                                           self.rayTo[i],
                                           self.rayMissColor,
                                           replaceItemUniqueId=self.rayIds[i],
                                           parentObjectUniqueId=quadruped,
                                           parentLinkIndex=0)
                else:
                    localHitTo = [
                        self.rayFrom[i][0] + hitFraction *
                        (self.rayTo[i][0] - self.rayFrom[i][0]),
                        self.rayFrom[i][1] + hitFraction *
                        (self.rayTo[i][1] - self.rayFrom[i][1]),
                        self.rayFrom[i][2] + hitFraction *
                        (self.rayTo[i][2] - self.rayFrom[i][2])
                    ]
                    dis = math.sqrt(localHitTo[0]**2 + localHitTo[1]**2 +
                                    localHitTo[2]**2)
                    if self.debugLidar:
                        p.addUserDebugLine(self.rayFrom[i],
                                           localHitTo,
                                           self.rayHitColor,
                                           replaceItemUniqueId=self.rayIds[i],
                                           parentObjectUniqueId=quadruped,
                                           parentLinkIndex=0)
            lastLidarTime = nowLidarTime

        # let the Simulator do the rest
        if (self.useRealTime):
            self.t = time.time() - self.ref_time
        else:
            self.t = self.t + self.fixedTimeStep
        if (self.useRealTime == False):
            p.stepSimulation()
            time.sleep(self.fixedTimeStep)